Commit 4746fd5c authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Trim blitter block size

Reduce the amount of work we do to verify client blt correctness as
currently our 0.5s subtests takes about 15s on slower devices!

v2: Grow the maximum block size until we run out of time
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200210231047.810929-1-chris@chris-wilson.co.uk
parent 0fde0b1d
...@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg) ...@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
struct intel_context *ce; struct intel_context *ce;
unsigned int prio; unsigned int prio;
IGT_TIMEOUT(end); IGT_TIMEOUT(end);
u64 total, max;
int err; int err;
ctx = thread->ctx; ctx = thread->ctx;
...@@ -225,24 +226,28 @@ static int igt_fill_blt_thread(void *arg) ...@@ -225,24 +226,28 @@ static int igt_fill_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0); ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce)); GEM_BUG_ON(IS_ERR(ce));
/*
* If we have a tiny shared address space, like for the GGTT
* then we can't be too greedy.
*/
max = ce->vm->total;
if (i915_is_ggtt(ce->vm) || thread->ctx)
max = div_u64(max, thread->n_cpus);
max >>= 4;
total = PAGE_SIZE;
do { do {
const u32 max_block_size = S16_MAX * PAGE_SIZE; /* Aim to keep the runtime under reasonable bounds! */
const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng); u32 val = prandom_u32_state(prng);
u64 total = ce->vm->total;
u32 phys_sz; u32 phys_sz;
u32 sz; u32 sz;
u32 *vaddr; u32 *vaddr;
u32 i; u32 i;
/* total = min(total, max);
* If we have a tiny shared address space, like for the GGTT sz = i915_prandom_u32_max_state(total, prng) + 1;
* then we can't be too greedy. phys_sz = sz % max_phys_size;
*/
if (i915_is_ggtt(ce->vm))
total = div64_u64(total, thread->n_cpus);
sz = min_t(u64, total >> 4, prandom_u32_state(prng));
phys_sz = sz % (max_block_size + 1);
sz = round_up(sz, PAGE_SIZE); sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE); phys_sz = round_up(phys_sz, PAGE_SIZE);
...@@ -276,13 +281,14 @@ static int igt_fill_blt_thread(void *arg) ...@@ -276,13 +281,14 @@ static int igt_fill_blt_thread(void *arg)
if (err) if (err)
goto err_unpin; goto err_unpin;
i915_gem_object_lock(obj); err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err) if (err)
goto err_unpin; goto err_unpin;
for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) { for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
if (vaddr[i] != val) { if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i, pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val); vaddr[i], val);
...@@ -293,6 +299,8 @@ static int igt_fill_blt_thread(void *arg) ...@@ -293,6 +299,8 @@ static int igt_fill_blt_thread(void *arg)
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj); i915_gem_object_put(obj);
total <<= 1;
} while (!time_after(jiffies, end)); } while (!time_after(jiffies, end));
goto err_flush; goto err_flush;
...@@ -319,6 +327,7 @@ static int igt_copy_blt_thread(void *arg) ...@@ -319,6 +327,7 @@ static int igt_copy_blt_thread(void *arg)
struct intel_context *ce; struct intel_context *ce;
unsigned int prio; unsigned int prio;
IGT_TIMEOUT(end); IGT_TIMEOUT(end);
u64 total, max;
int err; int err;
ctx = thread->ctx; ctx = thread->ctx;
...@@ -334,20 +343,28 @@ static int igt_copy_blt_thread(void *arg) ...@@ -334,20 +343,28 @@ static int igt_copy_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0); ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce)); GEM_BUG_ON(IS_ERR(ce));
/*
* If we have a tiny shared address space, like for the GGTT
* then we can't be too greedy.
*/
max = ce->vm->total;
if (i915_is_ggtt(ce->vm) || thread->ctx)
max = div_u64(max, thread->n_cpus);
max >>= 4;
total = PAGE_SIZE;
do { do {
const u32 max_block_size = S16_MAX * PAGE_SIZE; /* Aim to keep the runtime under reasonable bounds! */
const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng); u32 val = prandom_u32_state(prng);
u64 total = ce->vm->total;
u32 phys_sz; u32 phys_sz;
u32 sz; u32 sz;
u32 *vaddr; u32 *vaddr;
u32 i; u32 i;
if (i915_is_ggtt(ce->vm)) total = min(total, max);
total = div64_u64(total, thread->n_cpus); sz = i915_prandom_u32_max_state(total, prng) + 1;
phys_sz = sz % max_phys_size;
sz = min_t(u64, total >> 4, prandom_u32_state(prng));
phys_sz = sz % (max_block_size + 1);
sz = round_up(sz, PAGE_SIZE); sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE); phys_sz = round_up(phys_sz, PAGE_SIZE);
...@@ -397,13 +414,14 @@ static int igt_copy_blt_thread(void *arg) ...@@ -397,13 +414,14 @@ static int igt_copy_blt_thread(void *arg)
if (err) if (err)
goto err_unpin; goto err_unpin;
i915_gem_object_lock(dst); err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
err = i915_gem_object_set_to_cpu_domain(dst, false);
i915_gem_object_unlock(dst);
if (err) if (err)
goto err_unpin; goto err_unpin;
for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) { for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
if (vaddr[i] != val) { if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i, pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val); vaddr[i], val);
...@@ -416,6 +434,8 @@ static int igt_copy_blt_thread(void *arg) ...@@ -416,6 +434,8 @@ static int igt_copy_blt_thread(void *arg)
i915_gem_object_put(src); i915_gem_object_put(src);
i915_gem_object_put(dst); i915_gem_object_put(dst);
total <<= 1;
} while (!time_after(jiffies, end)); } while (!time_after(jiffies, end));
goto err_flush; goto err_flush;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment