Commit 621d07b2 authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915/selftests: rein in igt_write_huge

Rather than repeat the test for each engine, which takes a long time,
let's try alternating between the engines in some randomized
order.

v2: fix gen2 blunder
    fix !order blunder
    more cunning permutation construction!
Suggested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171123135421.17967-1-matthew.auld@intel.com
parent 6a44e177
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/prime_numbers.h> #include <linux/prime_numbers.h>
#include "mock_drm.h" #include "mock_drm.h"
#include "i915_random.h"
static const unsigned int page_sizes[] = { static const unsigned int page_sizes[] = {
I915_GTT_PAGE_SIZE_2M, I915_GTT_PAGE_SIZE_2M,
...@@ -1044,7 +1045,10 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1044,7 +1045,10 @@ static int igt_write_huge(struct i915_gem_context *ctx,
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
unsigned int max_page_size; unsigned int max_page_size;
...@@ -1052,6 +1056,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1052,6 +1056,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
u64 max; u64 max;
u64 num; u64 num;
u64 size; u64 size;
int *order;
int i, n;
int err = 0; int err = 0;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
...@@ -1067,19 +1073,32 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1067,19 +1073,32 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
n = 0;
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
if (!intel_engine_can_store_dword(engine)) { if (!intel_engine_can_store_dword(engine)) {
pr_info("store-dword-imm not supported on engine=%u\n", pr_info("store-dword-imm not supported on engine=%u\n", id);
id);
continue; continue;
} }
engines[n++] = engine;
}
if (!n)
return 0;
/*
* To keep things interesting when alternating between engines in our
* randomized order, lets also make feeding to the same engine a few
* times in succession a possibility by enlarging the permutation array.
*/
order = i915_random_order(n * I915_NUM_ENGINES, &prng);
if (!order)
return -ENOMEM;
/* /*
* Try various offsets until we timeout -- we want to avoid * Try various offsets until we timeout -- we want to avoid
* issues hidden by effectively always using offset = 0. * issues hidden by effectively always using offset = 0.
*/ */
i = 0;
for_each_prime_number_from(num, 0, max) { for_each_prime_number_from(num, 0, max) {
u64 offset = num * max_page_size; u64 offset = num * max_page_size;
u32 dword; u32 dword;
...@@ -1108,6 +1127,9 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1108,6 +1127,9 @@ static int igt_write_huge(struct i915_gem_context *ctx,
dword = offset_in_page(num) / 4; dword = offset_in_page(num) / 4;
engine = engines[order[i] % n];
i = (i + 1) % (n * I915_NUM_ENGINES);
err = gpu_write(vma, ctx, engine, dword, num + 1); err = gpu_write(vma, ctx, engine, dword, num + 1);
if (err) { if (err) {
pr_err("gpu-write failed at offset=%llx", offset); pr_err("gpu-write failed at offset=%llx", offset);
...@@ -1122,19 +1144,18 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1122,19 +1144,18 @@ static int igt_write_huge(struct i915_gem_context *ctx,
i915_vma_unpin(vma); i915_vma_unpin(vma);
if (num > 0 && if (igt_timeout(end_time,
igt_timeout(end_time,
"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n", "%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
__func__, id, offset, max_page_size)) __func__, engine->id, offset, max_page_size))
break; break;
} }
}
out_vma_unpin: out_vma_unpin:
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
i915_vma_unpin(vma); i915_vma_unpin(vma);
out_vma_close: out_vma_close:
i915_vma_close(vma); i915_vma_close(vma);
kfree(order);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment