Commit d8af05ff authored by Chris Wilson's avatar Chris Wilson

drm/i915: Allow sharing the idle-barrier from other kernel requests

By placing our idle-barriers in the i915_active fence tree, we expose
those for reuse by other components that are issuing requests along the
kernel_context. Reusing the proto-barrier active_node is perfectly fine
as the new request implies a context-switch, and so an opportune point
to run the idle-barrier. However, the proto-barrier is not equivalent
to a normal active_node and care must be taken to avoid dereferencing the
ERR_PTR used as its request marker.

v2: Comment the more egregious cheek
v3: A glossary!
Reported-by: default avatarLionel Landwerlin <lionel.g.landwerlin@intel.com>
Fixes: ce476c80 ("drm/i915: Keep contexts pinned until after the next kernel context switch")
Fixes: a9877da2 ("drm/i915/oa: Reconfigure contexts on the fly")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190802100015.1281-1-chris@chris-wilson.co.uk
parent 576f0586
...@@ -162,23 +162,41 @@ static int __intel_context_active(struct i915_active *active) ...@@ -162,23 +162,41 @@ static int __intel_context_active(struct i915_active *active)
if (err) if (err)
goto err_ring; goto err_ring;
return 0;
err_ring:
intel_ring_unpin(ce->ring);
err_put:
intel_context_put(ce);
return err;
}
int intel_context_active_acquire(struct intel_context *ce)
{
int err;
err = i915_active_acquire(&ce->active);
if (err)
return err;
/* Preallocate tracking nodes */ /* Preallocate tracking nodes */
if (!i915_gem_context_is_kernel(ce->gem_context)) { if (!i915_gem_context_is_kernel(ce->gem_context)) {
err = i915_active_acquire_preallocate_barrier(&ce->active, err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine); ce->engine);
if (err) if (err) {
goto err_state; i915_active_release(&ce->active);
return err;
}
} }
return 0; return 0;
}
err_state: void intel_context_active_release(struct intel_context *ce)
__context_unpin_state(ce->state); {
err_ring: /* Nodes preallocated in intel_context_active() */
intel_ring_unpin(ce->ring); i915_active_acquire_barrier(&ce->active);
err_put: i915_active_release(&ce->active);
intel_context_put(ce);
return err;
} }
void void
...@@ -301,3 +319,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce) ...@@ -301,3 +319,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
return rq; return rq;
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
...@@ -104,17 +104,8 @@ static inline void intel_context_exit(struct intel_context *ce) ...@@ -104,17 +104,8 @@ static inline void intel_context_exit(struct intel_context *ce)
ce->ops->exit(ce); ce->ops->exit(ce);
} }
static inline int intel_context_active_acquire(struct intel_context *ce) int intel_context_active_acquire(struct intel_context *ce);
{ void intel_context_active_release(struct intel_context *ce);
return i915_active_acquire(&ce->active);
}
static inline void intel_context_active_release(struct intel_context *ce)
{
/* Nodes preallocated in intel_context_active() */
i915_active_acquire_barrier(&ce->active);
i915_active_release(&ce->active);
}
static inline struct intel_context *intel_context_get(struct intel_context *ce) static inline struct intel_context *intel_context_get(struct intel_context *ce)
{ {
......
...@@ -90,7 +90,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) ...@@ -90,7 +90,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
/* Check again on the next retirement. */ /* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1; engine->wakeref_serial = engine->serial + 1;
i915_request_add_barriers(rq); i915_request_add_active_barriers(rq);
__i915_request_commit(rq); __i915_request_commit(rq);
return false; return false;
......
/*
* SPDX-License-Identifier: GPL-2.0
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_selftest.h"
#include "intel_gt.h"
#include "gem/selftests/mock_context.h"
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
static int request_sync(struct i915_request *rq)
{
long timeout;
int err = 0;
i915_request_get(rq);
i915_request_add(rq);
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
err = timeout;
else
i915_request_retire_upto(rq);
i915_request_put(rq);
return err;
}
static int context_sync(struct intel_context *ce)
{
struct intel_timeline *tl = ce->ring->timeline;
int err = 0;
do {
struct i915_request *rq;
long timeout;
rcu_read_lock();
rq = rcu_dereference(tl->last_request.request);
if (rq)
rq = i915_request_get_rcu(rq);
rcu_read_unlock();
if (!rq)
break;
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
err = timeout;
else
i915_request_retire_upto(rq);
i915_request_put(rq);
} while (!err);
return err;
}
static int __live_active_context(struct intel_engine_cs *engine,
struct i915_gem_context *fixme)
{
struct intel_context *ce;
int pass;
int err;
/*
* We keep active contexts alive until after a subsequent context
* switch as the final write from the context-save will be after
* we retire the final request. We track when we unpin the context,
* under the presumption that the final pin is from the last request,
* and instead of immediately unpinning the context, we add a task
* to unpin the context from the next idle-barrier.
*
* This test makes sure that the context is kept alive until a
* subsequent idle-barrier (emitted when the engine wakeref hits 0
* with no more outstanding requests).
*/
if (intel_engine_pm_is_awake(engine)) {
pr_err("%s is awake before starting %s!\n",
engine->name, __func__);
return -EINVAL;
}
ce = intel_context_create(fixme, engine);
if (!ce)
return -ENOMEM;
for (pass = 0; pass <= 2; pass++) {
struct i915_request *rq;
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err;
}
err = request_sync(rq);
if (err)
goto err;
/* Context will be kept active until after an idle-barrier. */
if (i915_active_is_idle(&ce->active)) {
pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
engine->name, pass);
err = -EINVAL;
goto err;
}
if (!intel_engine_pm_is_awake(engine)) {
pr_err("%s is asleep before idle-barrier\n",
engine->name);
err = -EINVAL;
goto err;
}
}
/* Now make sure our idle-barriers are flushed */
err = context_sync(engine->kernel_context);
if (err)
goto err;
if (!i915_active_is_idle(&ce->active)) {
pr_err("context is still active!");
err = -EINVAL;
}
if (intel_engine_pm_is_awake(engine)) {
struct drm_printer p = drm_debug_printer(__func__);
intel_engine_dump(engine, &p,
"%s is still awake after idle-barriers\n",
engine->name);
GEM_TRACE_DUMP();
err = -EINVAL;
goto err;
}
err:
intel_context_put(ce);
return err;
}
static int live_active_context(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *fixme;
enum intel_engine_id id;
struct drm_file *file;
int err = 0;
file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&gt->i915->drm.struct_mutex);
fixme = live_context(gt->i915, file);
if (!fixme) {
err = -ENOMEM;
goto unlock;
}
for_each_engine(engine, gt->i915, id) {
err = __live_active_context(engine, fixme);
if (err)
break;
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
if (err)
break;
}
unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
mock_file_free(gt->i915, file);
return err;
}
static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
{
struct i915_request *rq;
int err;
err = intel_context_pin(remote);
if (err)
return err;
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin;
}
err = intel_context_prepare_remote_request(remote, rq);
if (err) {
i915_request_add(rq);
goto unpin;
}
err = request_sync(rq);
unpin:
intel_context_unpin(remote);
return err;
}
static int __live_remote_context(struct intel_engine_cs *engine,
struct i915_gem_context *fixme)
{
struct intel_context *local, *remote;
int pass;
int err;
/*
* Check that our idle barriers do not interfere with normal
* activity tracking. In particular, check that operating
* on the context image remotely (intel_context_prepare_remote_request),
* which inserts foreign fences into intel_context.active, does not
* clobber the idle-barrier.
*/
remote = intel_context_create(fixme, engine);
if (!remote)
return -ENOMEM;
local = intel_context_create(fixme, engine);
if (!local) {
err = -ENOMEM;
goto err_remote;
}
for (pass = 0; pass <= 2; pass++) {
err = __remote_sync(local, remote);
if (err)
break;
err = __remote_sync(engine->kernel_context, remote);
if (err)
break;
if (i915_active_is_idle(&remote->active)) {
pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
engine->name, pass);
err = -EINVAL;
break;
}
}
intel_context_put(local);
err_remote:
intel_context_put(remote);
return err;
}
static int live_remote_context(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *fixme;
enum intel_engine_id id;
struct drm_file *file;
int err = 0;
file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&gt->i915->drm.struct_mutex);
fixme = live_context(gt->i915, file);
if (!fixme) {
err = -ENOMEM;
goto unlock;
}
for_each_engine(engine, gt->i915, id) {
err = __live_remote_context(engine, fixme);
if (err)
break;
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
if (err)
break;
}
unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
mock_file_free(gt->i915, file);
return err;
}
int intel_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_active_context),
SUBTEST(live_remote_context),
};
struct intel_gt *gt = &i915->gt;
if (intel_gt_is_wedged(gt))
return 0;
return intel_gt_live_subtests(tests, gt);
}
This diff is collapsed.
...@@ -413,6 +413,6 @@ static inline void i915_active_fini(struct i915_active *ref) { } ...@@ -413,6 +413,6 @@ static inline void i915_active_fini(struct i915_active *ref) { }
int i915_active_acquire_preallocate_barrier(struct i915_active *ref, int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
void i915_active_acquire_barrier(struct i915_active *ref); void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_barriers(struct i915_request *rq); void i915_request_add_active_barriers(struct i915_request *rq);
#endif /* _I915_ACTIVE_H_ */ #endif /* _I915_ACTIVE_H_ */
...@@ -42,7 +42,7 @@ struct i915_active { ...@@ -42,7 +42,7 @@ struct i915_active {
int (*active)(struct i915_active *ref); int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref); void (*retire)(struct i915_active *ref);
struct llist_head barriers; struct llist_head preallocated_barriers;
}; };
#endif /* _I915_ACTIVE_TYPES_H_ */ #endif /* _I915_ACTIVE_TYPES_H_ */
...@@ -15,6 +15,7 @@ selftest(workarounds, intel_workarounds_live_selftests) ...@@ -15,6 +15,7 @@ selftest(workarounds, intel_workarounds_live_selftests)
selftest(timelines, intel_timeline_live_selftests) selftest(timelines, intel_timeline_live_selftests)
selftest(requests, i915_request_live_selftests) selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests) selftest(active, i915_active_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
selftest(objects, i915_gem_object_live_selftests) selftest(objects, i915_gem_object_live_selftests)
selftest(mman, i915_gem_mman_live_selftests) selftest(mman, i915_gem_mman_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests)
...@@ -24,7 +25,7 @@ selftest(gtt, i915_gem_gtt_live_selftests) ...@@ -24,7 +25,7 @@ selftest(gtt, i915_gem_gtt_live_selftests)
selftest(gem, i915_gem_live_selftests) selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests) selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests) selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests) selftest(gem_contexts, i915_gem_context_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests) selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests) selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests) selftest(reset, intel_reset_live_selftests)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment