Commit e9768bfe authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Teach requests to use all available engines

The request selftests straddle the boundary between checking the driver
and the hardware. They are subject to the quirks of the underlying HW,
but operate on top of the backend abstractions. The tests focus on the
scheduler elements and so should check for interactions of the scheduler
across all exposed engines.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191016125236.17960-1-chris@chris-wilson.co.uk
parent 0b23e2a6
...@@ -37,6 +37,18 @@ ...@@ -37,6 +37,18 @@
#include "mock_drm.h" #include "mock_drm.h"
#include "mock_gem_device.h" #include "mock_gem_device.h"
static unsigned int num_uabi_engines(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
unsigned int count;
count = 0;
for_each_uabi_engine(engine, i915)
count++;
return count;
}
static int igt_add_request(void *arg) static int igt_add_request(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
...@@ -281,7 +293,7 @@ static int __igt_breadcrumbs_smoketest(void *arg) ...@@ -281,7 +293,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
* that the fences were marked as signaled. * that the fences were marked as signaled.
*/ */
requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL); requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
if (!requests) if (!requests)
return -ENOMEM; return -ENOMEM;
...@@ -422,12 +434,11 @@ static int mock_breadcrumbs_smoketest(void *arg) ...@@ -422,12 +434,11 @@ static int mock_breadcrumbs_smoketest(void *arg)
* See __igt_breadcrumbs_smoketest(); * See __igt_breadcrumbs_smoketest();
*/ */
threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL); threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
if (!threads) if (!threads)
return -ENOMEM; return -ENOMEM;
t.contexts = t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
if (!t.contexts) { if (!t.contexts) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_threads; goto out_threads;
...@@ -511,15 +522,15 @@ static int live_nop_request(void *arg) ...@@ -511,15 +522,15 @@ static int live_nop_request(void *arg)
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct igt_live_test t; struct igt_live_test t;
unsigned int id;
int err = -ENODEV; int err = -ENODEV;
/* Submit various sized batches of empty requests, to each engine /*
* Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check * (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware. * the overhead of submitting requests to the hardware.
*/ */
for_each_engine(engine, i915, id) { for_each_uabi_engine(engine, i915) {
unsigned long n, prime; unsigned long n, prime;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
ktime_t times[2] = {}; ktime_t times[2] = {};
...@@ -539,7 +550,8 @@ static int live_nop_request(void *arg) ...@@ -539,7 +550,8 @@ static int live_nop_request(void *arg)
if (IS_ERR(request)) if (IS_ERR(request))
return PTR_ERR(request); return PTR_ERR(request);
/* This space is left intentionally blank. /*
* This space is left intentionally blank.
* *
* We do not actually want to perform any * We do not actually want to perform any
* action with this request, we just want * action with this request, we just want
...@@ -657,10 +669,10 @@ static int live_empty_request(void *arg) ...@@ -657,10 +669,10 @@ static int live_empty_request(void *arg)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct igt_live_test t; struct igt_live_test t;
struct i915_vma *batch; struct i915_vma *batch;
unsigned int id;
int err = 0; int err = 0;
/* Submit various sized batches of empty requests, to each engine /*
* Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check * (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware. * the overhead of submitting requests to the hardware.
*/ */
...@@ -669,7 +681,7 @@ static int live_empty_request(void *arg) ...@@ -669,7 +681,7 @@ static int live_empty_request(void *arg)
if (IS_ERR(batch)) if (IS_ERR(batch))
return PTR_ERR(batch); return PTR_ERR(batch);
for_each_engine(engine, i915, id) { for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
struct i915_request *request; struct i915_request *request;
unsigned long n, prime; unsigned long n, prime;
...@@ -801,63 +813,73 @@ static int recursive_batch_resolve(struct i915_vma *batch) ...@@ -801,63 +813,73 @@ static int recursive_batch_resolve(struct i915_vma *batch)
static int live_all_engines(void *arg) static int live_all_engines(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
const unsigned int nengines = num_uabi_engines(i915);
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_request *request[I915_NUM_ENGINES]; struct i915_request **request;
struct igt_live_test t; struct igt_live_test t;
struct i915_vma *batch; struct i915_vma *batch;
unsigned int id; unsigned int idx;
int err; int err;
/* Check we can submit requests to all engines simultaneously. We /*
* Check we can submit requests to all engines simultaneously. We
* send a recursive batch to each engine - checking that we don't * send a recursive batch to each engine - checking that we don't
* block doing so, and that they don't complete too soon. * block doing so, and that they don't complete too soon.
*/ */
request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, ""); err = igt_live_test_begin(&t, i915, __func__, "");
if (err) if (err)
return err; goto out_free;
batch = recursive_batch(i915); batch = recursive_batch(i915);
if (IS_ERR(batch)) { if (IS_ERR(batch)) {
err = PTR_ERR(batch); err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n", __func__, err); pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
return err; goto out_free;
} }
for_each_engine(engine, i915, id) { idx = 0;
request[id] = i915_request_create(engine->kernel_context); for_each_uabi_engine(engine, i915) {
if (IS_ERR(request[id])) { request[idx] = i915_request_create(engine->kernel_context);
err = PTR_ERR(request[id]); if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n", pr_err("%s: Request allocation failed with err=%d\n",
__func__, err); __func__, err);
goto out_request; goto out_request;
} }
err = engine->emit_bb_start(request[id], err = engine->emit_bb_start(request[idx],
batch->node.start, batch->node.start,
batch->node.size, batch->node.size,
0); 0);
GEM_BUG_ON(err); GEM_BUG_ON(err);
request[id]->batch = batch; request[idx]->batch = batch;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_request_await_object(request[id], batch->obj, 0); err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(batch, request[id], 0); err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch); i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
i915_request_get(request[id]); i915_request_get(request[idx]);
i915_request_add(request[id]); i915_request_add(request[idx]);
idx++;
} }
for_each_engine(engine, i915, id) { idx = 0;
if (i915_request_completed(request[id])) { for_each_uabi_engine(engine, i915) {
if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n", pr_err("%s(%s): request completed too early!\n",
__func__, engine->name); __func__, engine->name);
err = -EINVAL; err = -EINVAL;
goto out_request; goto out_request;
} }
idx++;
} }
err = recursive_batch_resolve(batch); err = recursive_batch_resolve(batch);
...@@ -866,10 +888,11 @@ static int live_all_engines(void *arg) ...@@ -866,10 +888,11 @@ static int live_all_engines(void *arg)
goto out_request; goto out_request;
} }
for_each_engine(engine, i915, id) { idx = 0;
for_each_uabi_engine(engine, i915) {
long timeout; long timeout;
timeout = i915_request_wait(request[id], 0, timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) { if (timeout < 0) {
err = timeout; err = timeout;
...@@ -878,43 +901,56 @@ static int live_all_engines(void *arg) ...@@ -878,43 +901,56 @@ static int live_all_engines(void *arg)
goto out_request; goto out_request;
} }
GEM_BUG_ON(!i915_request_completed(request[id])); GEM_BUG_ON(!i915_request_completed(request[idx]));
i915_request_put(request[id]); i915_request_put(request[idx]);
request[id] = NULL; request[idx] = NULL;
idx++;
} }
err = igt_live_test_end(&t); err = igt_live_test_end(&t);
out_request: out_request:
for_each_engine(engine, i915, id) idx = 0;
if (request[id]) for_each_uabi_engine(engine, i915) {
i915_request_put(request[id]); if (request[idx])
i915_request_put(request[idx]);
idx++;
}
i915_vma_unpin(batch); i915_vma_unpin(batch);
i915_vma_put(batch); i915_vma_put(batch);
out_free:
kfree(request);
return err; return err;
} }
static int live_sequential_engines(void *arg) static int live_sequential_engines(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct i915_request *request[I915_NUM_ENGINES] = {}; const unsigned int nengines = num_uabi_engines(i915);
struct i915_request **request;
struct i915_request *prev = NULL; struct i915_request *prev = NULL;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct igt_live_test t; struct igt_live_test t;
unsigned int id; unsigned int idx;
int err; int err;
/* Check we can submit requests to all engines sequentially, such /*
* Check we can submit requests to all engines sequentially, such
* that each successive request waits for the earlier ones. This * that each successive request waits for the earlier ones. This
* tests that we don't execute requests out of order, even though * tests that we don't execute requests out of order, even though
* they are running on independent engines. * they are running on independent engines.
*/ */
request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, ""); err = igt_live_test_begin(&t, i915, __func__, "");
if (err) if (err)
return err; goto out_free;
for_each_engine(engine, i915, id) { idx = 0;
for_each_uabi_engine(engine, i915) {
struct i915_vma *batch; struct i915_vma *batch;
batch = recursive_batch(i915); batch = recursive_batch(i915);
...@@ -922,66 +958,69 @@ static int live_sequential_engines(void *arg) ...@@ -922,66 +958,69 @@ static int live_sequential_engines(void *arg)
err = PTR_ERR(batch); err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n", pr_err("%s: Unable to create batch for %s, err=%d\n",
__func__, engine->name, err); __func__, engine->name, err);
return err; goto out_free;
} }
request[id] = i915_request_create(engine->kernel_context); request[idx] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) { if (IS_ERR(request[idx])) {
err = PTR_ERR(request[id]); err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n", pr_err("%s: Request allocation failed for %s with err=%d\n",
__func__, engine->name, err); __func__, engine->name, err);
goto out_request; goto out_request;
} }
if (prev) { if (prev) {
err = i915_request_await_dma_fence(request[id], err = i915_request_await_dma_fence(request[idx],
&prev->fence); &prev->fence);
if (err) { if (err) {
i915_request_add(request[id]); i915_request_add(request[idx]);
pr_err("%s: Request await failed for %s with err=%d\n", pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err); __func__, engine->name, err);
goto out_request; goto out_request;
} }
} }
err = engine->emit_bb_start(request[id], err = engine->emit_bb_start(request[idx],
batch->node.start, batch->node.start,
batch->node.size, batch->node.size,
0); 0);
GEM_BUG_ON(err); GEM_BUG_ON(err);
request[id]->batch = batch; request[idx]->batch = batch;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_request_await_object(request[id], batch->obj, false); err = i915_request_await_object(request[idx],
batch->obj, false);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(batch, request[id], 0); err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch); i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
i915_request_get(request[id]); i915_request_get(request[idx]);
i915_request_add(request[id]); i915_request_add(request[idx]);
prev = request[id]; prev = request[idx];
idx++;
} }
for_each_engine(engine, i915, id) { idx = 0;
for_each_uabi_engine(engine, i915) {
long timeout; long timeout;
if (i915_request_completed(request[id])) { if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n", pr_err("%s(%s): request completed too early!\n",
__func__, engine->name); __func__, engine->name);
err = -EINVAL; err = -EINVAL;
goto out_request; goto out_request;
} }
err = recursive_batch_resolve(request[id]->batch); err = recursive_batch_resolve(request[idx]->batch);
if (err) { if (err) {
pr_err("%s: failed to resolve batch, err=%d\n", pr_err("%s: failed to resolve batch, err=%d\n",
__func__, err); __func__, err);
goto out_request; goto out_request;
} }
timeout = i915_request_wait(request[id], 0, timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) { if (timeout < 0) {
err = timeout; err = timeout;
...@@ -990,30 +1029,35 @@ static int live_sequential_engines(void *arg) ...@@ -990,30 +1029,35 @@ static int live_sequential_engines(void *arg)
goto out_request; goto out_request;
} }
GEM_BUG_ON(!i915_request_completed(request[id])); GEM_BUG_ON(!i915_request_completed(request[idx]));
idx++;
} }
err = igt_live_test_end(&t); err = igt_live_test_end(&t);
out_request: out_request:
for_each_engine(engine, i915, id) { idx = 0;
for_each_uabi_engine(engine, i915) {
u32 *cmd; u32 *cmd;
if (!request[id]) if (!request[idx])
break; break;
cmd = i915_gem_object_pin_map(request[id]->batch->obj, cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
I915_MAP_WC); I915_MAP_WC);
if (!IS_ERR(cmd)) { if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt); intel_gt_chipset_flush(engine->gt);
i915_gem_object_unpin_map(request[id]->batch->obj); i915_gem_object_unpin_map(request[idx]->batch->obj);
} }
i915_vma_put(request[id]->batch); i915_vma_put(request[idx]->batch);
i915_request_put(request[id]); i915_request_put(request[idx]);
idx++;
} }
out_free:
kfree(request);
return err; return err;
} }
...@@ -1079,9 +1123,10 @@ static int live_parallel_engines(void *arg) ...@@ -1079,9 +1123,10 @@ static int live_parallel_engines(void *arg)
__live_parallel_engineN, __live_parallel_engineN,
NULL, NULL,
}; };
const unsigned int nengines = num_uabi_engines(i915);
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id;
int (* const *fn)(void *arg); int (* const *fn)(void *arg);
struct task_struct **tsk;
int err = 0; int err = 0;
/* /*
...@@ -1089,42 +1134,49 @@ static int live_parallel_engines(void *arg) ...@@ -1089,42 +1134,49 @@ static int live_parallel_engines(void *arg)
* tests that we load up the system maximally. * tests that we load up the system maximally.
*/ */
tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
if (!tsk)
return -ENOMEM;
for (fn = func; !err && *fn; fn++) { for (fn = func; !err && *fn; fn++) {
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct igt_live_test t; struct igt_live_test t;
unsigned int idx;
err = igt_live_test_begin(&t, i915, __func__, ""); err = igt_live_test_begin(&t, i915, __func__, "");
if (err) if (err)
break; break;
for_each_engine(engine, i915, id) { idx = 0;
tsk[id] = kthread_run(*fn, engine, for_each_uabi_engine(engine, i915) {
"igt/parallel:%s", tsk[idx] = kthread_run(*fn, engine,
engine->name); "igt/parallel:%s",
if (IS_ERR(tsk[id])) { engine->name);
err = PTR_ERR(tsk[id]); if (IS_ERR(tsk[idx])) {
err = PTR_ERR(tsk[idx]);
break; break;
} }
get_task_struct(tsk[id]); get_task_struct(tsk[idx++]);
} }
for_each_engine(engine, i915, id) { idx = 0;
for_each_uabi_engine(engine, i915) {
int status; int status;
if (IS_ERR_OR_NULL(tsk[id])) if (IS_ERR(tsk[idx]))
continue; break;
status = kthread_stop(tsk[id]); status = kthread_stop(tsk[idx]);
if (status && !err) if (status && !err)
err = status; err = status;
put_task_struct(tsk[id]); put_task_struct(tsk[idx++]);
} }
if (igt_live_test_end(&t)) if (igt_live_test_end(&t))
err = -EIO; err = -EIO;
} }
kfree(tsk);
return err; return err;
} }
...@@ -1168,16 +1220,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine) ...@@ -1168,16 +1220,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
static int live_breadcrumbs_smoketest(void *arg) static int live_breadcrumbs_smoketest(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct smoketest t[I915_NUM_ENGINES]; const unsigned int nengines = num_uabi_engines(i915);
unsigned int ncpus = num_online_cpus(); const unsigned int ncpus = num_online_cpus();
unsigned long num_waits, num_fences; unsigned long num_waits, num_fences;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct task_struct **threads; struct task_struct **threads;
struct igt_live_test live; struct igt_live_test live;
enum intel_engine_id id;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct drm_file *file; struct drm_file *file;
unsigned int n; struct smoketest *smoke;
unsigned int n, idx;
int ret = 0; int ret = 0;
/* /*
...@@ -1196,28 +1248,31 @@ static int live_breadcrumbs_smoketest(void *arg) ...@@ -1196,28 +1248,31 @@ static int live_breadcrumbs_smoketest(void *arg)
goto out_rpm; goto out_rpm;
} }
threads = kcalloc(ncpus * I915_NUM_ENGINES, smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
sizeof(*threads), if (!smoke) {
GFP_KERNEL);
if (!threads) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_file; goto out_file;
} }
memset(&t[0], 0, sizeof(t[0])); threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
t[0].request_alloc = __live_request_alloc; if (!threads) {
t[0].ncontexts = 64; ret = -ENOMEM;
t[0].contexts = kmalloc_array(t[0].ncontexts, goto out_smoke;
sizeof(*t[0].contexts), }
GFP_KERNEL);
if (!t[0].contexts) { smoke[0].request_alloc = __live_request_alloc;
smoke[0].ncontexts = 64;
smoke[0].contexts = kcalloc(smoke[0].ncontexts,
sizeof(*smoke[0].contexts),
GFP_KERNEL);
if (!smoke[0].contexts) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_threads; goto out_threads;
} }
for (n = 0; n < t[0].ncontexts; n++) { for (n = 0; n < smoke[0].ncontexts; n++) {
t[0].contexts[n] = live_context(i915, file); smoke[0].contexts[n] = live_context(i915, file);
if (!t[0].contexts[n]) { if (!smoke[0].contexts[n]) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_contexts; goto out_contexts;
} }
...@@ -1227,42 +1282,47 @@ static int live_breadcrumbs_smoketest(void *arg) ...@@ -1227,42 +1282,47 @@ static int live_breadcrumbs_smoketest(void *arg)
if (ret) if (ret)
goto out_contexts; goto out_contexts;
for_each_engine(engine, i915, id) { idx = 0;
t[id] = t[0]; for_each_uabi_engine(engine, i915) {
t[id].engine = engine; smoke[idx] = smoke[0];
t[id].max_batch = max_batches(t[0].contexts[0], engine); smoke[idx].engine = engine;
if (t[id].max_batch < 0) { smoke[idx].max_batch =
ret = t[id].max_batch; max_batches(smoke[0].contexts[0], engine);
if (smoke[idx].max_batch < 0) {
ret = smoke[idx].max_batch;
goto out_flush; goto out_flush;
} }
/* One ring interleaved between requests from all cpus */ /* One ring interleaved between requests from all cpus */
t[id].max_batch /= num_online_cpus() + 1; smoke[idx].max_batch /= num_online_cpus() + 1;
pr_debug("Limiting batches to %d requests on %s\n", pr_debug("Limiting batches to %d requests on %s\n",
t[id].max_batch, engine->name); smoke[idx].max_batch, engine->name);
for (n = 0; n < ncpus; n++) { for (n = 0; n < ncpus; n++) {
struct task_struct *tsk; struct task_struct *tsk;
tsk = kthread_run(__igt_breadcrumbs_smoketest, tsk = kthread_run(__igt_breadcrumbs_smoketest,
&t[id], "igt/%d.%d", id, n); &smoke[idx], "igt/%d.%d", idx, n);
if (IS_ERR(tsk)) { if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk); ret = PTR_ERR(tsk);
goto out_flush; goto out_flush;
} }
get_task_struct(tsk); get_task_struct(tsk);
threads[id * ncpus + n] = tsk; threads[idx * ncpus + n] = tsk;
} }
idx++;
} }
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
out_flush: out_flush:
idx = 0;
num_waits = 0; num_waits = 0;
num_fences = 0; num_fences = 0;
for_each_engine(engine, i915, id) { for_each_uabi_engine(engine, i915) {
for (n = 0; n < ncpus; n++) { for (n = 0; n < ncpus; n++) {
struct task_struct *tsk = threads[id * ncpus + n]; struct task_struct *tsk = threads[idx * ncpus + n];
int err; int err;
if (!tsk) if (!tsk)
...@@ -1275,17 +1335,20 @@ static int live_breadcrumbs_smoketest(void *arg) ...@@ -1275,17 +1335,20 @@ static int live_breadcrumbs_smoketest(void *arg)
put_task_struct(tsk); put_task_struct(tsk);
} }
num_waits += atomic_long_read(&t[id].num_waits); num_waits += atomic_long_read(&smoke[idx].num_waits);
num_fences += atomic_long_read(&t[id].num_fences); num_fences += atomic_long_read(&smoke[idx].num_fences);
idx++;
} }
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
ret = igt_live_test_end(&live) ?: ret; ret = igt_live_test_end(&live) ?: ret;
out_contexts: out_contexts:
kfree(t[0].contexts); kfree(smoke[0].contexts);
out_threads: out_threads:
kfree(threads); kfree(threads);
out_smoke:
kfree(smoke);
out_file: out_file:
mock_file_free(i915, file); mock_file_free(i915, file);
out_rpm: out_rpm:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment