Commit 6d06779e authored by Chris Wilson's avatar Chris Wilson

drm/i915: Load balancing across a virtual engine

Having allowed the user to define a set of engines that they will want
to only use, we go one step further and allow them to bind those engines
into a single virtual instance. Submitting a batch to the virtual engine
will then forward it to any one of the set in a manner as best to
distribute load.  The virtual engine has a single timeline across all
engines (it operates as a single queue), so it is not able to concurrently
run batches across multiple engines by itself; that is left up to the user
to submit multiple concurrent batches to multiple queues. Multiple users
will be load balanced across the system.

The mechanism used for load balancing in this patch is a late greedy
balancer. When a request is ready for execution, it is added to each
engine's queue, and when an engine is ready for its next request it
claims it from the virtual engine. The first engine to do so, wins, i.e.
the request is executed at the earliest opportunity (idle moment) in the
system.

As not all HW is created equal, the user is still able to skip the
virtual engine and execute the batch on a specific engine, all within the
same queue. It will then be executed in order on the correct engine,
with execution on other virtual engines being moved away due to the load
detection.

A couple of areas for potential improvement left!

- The virtual engine always take priority over equal-priority tasks.
Mostly broken up by applying FQ_CODEL rules for prioritising new clients,
and hopefully the virtual and real engines are not then congested (i.e.
all work is via virtual engines, or all work is to the real engine).

- We require the breadcrumb irq around every virtual engine request. For
normal engines, we eliminate the need for the slow round trip via
interrupt by using the submit fence and queueing in order. For virtual
engines, we have to allow any job to transfer to a new ring, and cannot
coalesce the submissions, so require the completion fence instead,
forcing the persistent use of interrupts.

- We only drip feed single requests through each virtual engine and onto
the physical engines, even if there was enough work to fill all ELSP,
leaving small stalls with an idle CS event at the end of every request.
Could we be greedy and fill both slots? Being lazy is virtuous for load
distribution on less-than-full workloads though.

Other areas of improvement are more general, such as reducing lock
contention, reducing dispatch overhead, looking at direct submission
rather than bouncing around tasklets etc.

sseu: Lift the restriction to allow sseu to be reconfigured on virtual
engines composed of RENDER_CLASS (rcs).

v2: macroize check_user_mbz()
v3: Cancel virtual engines on wedging
v4: Commence commenting
v5: Replace 64b sibling_mask with a list of class:instance
v6: Drop the one-element array in the uabi
v7: Assert it is an virtual engine in to_virtual_engine()
v8: Skip over holes in [class][inst] so we can selftest with (vcs0, vcs2)

Link: https://github.com/intel/media-driver/pull/283Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-6-chris@chris-wilson.co.uk
parent b81dde71
...@@ -227,6 +227,7 @@ struct intel_engine_execlists { ...@@ -227,6 +227,7 @@ struct intel_engine_execlists {
* @queue: queue of requests, in priority lists * @queue: queue of requests, in priority lists
*/ */
struct rb_root_cached queue; struct rb_root_cached queue;
struct rb_root_cached virtual;
/** /**
* @csb_write: control register for Context Switch buffer * @csb_write: control register for Context Switch buffer
...@@ -445,6 +446,7 @@ struct intel_engine_cs { ...@@ -445,6 +446,7 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3) #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
unsigned int flags; unsigned int flags;
/* /*
...@@ -534,6 +536,12 @@ intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine) ...@@ -534,6 +536,12 @@ intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
} }
static inline bool
intel_engine_is_virtual(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
#define instdone_slice_mask(dev_priv__) \ #define instdone_slice_mask(dev_priv__) \
(IS_GEN(dev_priv__, 7) ? \ (IS_GEN(dev_priv__, 7) ? \
1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
......
This diff is collapsed.
...@@ -114,4 +114,13 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, ...@@ -114,4 +114,13 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
const char *prefix), const char *prefix),
unsigned int max); unsigned int max);
struct intel_context *
intel_execlists_create_virtual(struct i915_gem_context *ctx,
struct intel_engine_cs **siblings,
unsigned int count);
struct intel_context *
intel_execlists_clone_virtual(struct i915_gem_context *ctx,
struct intel_engine_cs *src);
#endif /* _INTEL_LRC_H_ */ #endif /* _INTEL_LRC_H_ */
...@@ -1310,6 +1310,185 @@ static int live_preempt_smoke(void *arg) ...@@ -1310,6 +1310,185 @@ static int live_preempt_smoke(void *arg)
return err; return err;
} }
static int nop_virtual_engine(struct drm_i915_private *i915,
struct intel_engine_cs **siblings,
unsigned int nsibling,
unsigned int nctx,
unsigned int flags)
#define CHAIN BIT(0)
{
IGT_TIMEOUT(end_time);
struct i915_request *request[16];
struct i915_gem_context *ctx[16];
struct intel_context *ve[16];
unsigned long n, prime, nc;
struct igt_live_test t;
ktime_t times[2] = {};
int err;
GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
for (n = 0; n < nctx; n++) {
ctx[n] = kernel_context(i915);
if (!ctx[n]) {
err = -ENOMEM;
nctx = n;
goto out;
}
ve[n] = intel_execlists_create_virtual(ctx[n],
siblings, nsibling);
if (IS_ERR(ve[n])) {
kernel_context_close(ctx[n]);
err = PTR_ERR(ve[n]);
nctx = n;
goto out;
}
err = intel_context_pin(ve[n]);
if (err) {
intel_context_put(ve[n]);
kernel_context_close(ctx[n]);
nctx = n;
goto out;
}
}
err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
if (err)
goto out;
for_each_prime_number_from(prime, 1, 8192) {
times[1] = ktime_get_raw();
if (flags & CHAIN) {
for (nc = 0; nc < nctx; nc++) {
for (n = 0; n < prime; n++) {
request[nc] =
i915_request_create(ve[nc]);
if (IS_ERR(request[nc])) {
err = PTR_ERR(request[nc]);
goto out;
}
i915_request_add(request[nc]);
}
}
} else {
for (n = 0; n < prime; n++) {
for (nc = 0; nc < nctx; nc++) {
request[nc] =
i915_request_create(ve[nc]);
if (IS_ERR(request[nc])) {
err = PTR_ERR(request[nc]);
goto out;
}
i915_request_add(request[nc]);
}
}
}
for (nc = 0; nc < nctx; nc++) {
if (i915_request_wait(request[nc],
I915_WAIT_LOCKED,
HZ / 10) < 0) {
pr_err("%s(%s): wait for %llx:%lld timed out\n",
__func__, ve[0]->engine->name,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
__func__, ve[0]->engine->name,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
break;
}
}
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
times[0] = times[1];
if (__igt_timeout(end_time, NULL))
break;
}
err = igt_live_test_end(&t);
if (err)
goto out;
pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
prime, div64_u64(ktime_to_ns(times[1]), prime));
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
intel_context_unpin(ve[nc]);
intel_context_put(ve[nc]);
kernel_context_close(ctx[nc]);
}
return err;
}
static int live_virtual_engine(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int class, inst;
int err = -ENODEV;
if (USES_GUC_SUBMISSION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id) {
err = nop_virtual_engine(i915, &engine, 1, 1, 0);
if (err) {
pr_err("Failed to wrap engine %s: err=%d\n",
engine->name, err);
goto out_unlock;
}
}
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, n;
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
if (!i915->engine_class[class][inst])
continue;
siblings[nsibling++] = i915->engine_class[class][inst];
}
if (nsibling < 2)
continue;
for (n = 1; n <= nsibling + 1; n++) {
err = nop_virtual_engine(i915, siblings, nsibling,
n, 0);
if (err)
goto out_unlock;
}
err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
if (err)
goto out_unlock;
}
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
int intel_execlists_live_selftests(struct drm_i915_private *i915) int intel_execlists_live_selftests(struct drm_i915_private *i915)
{ {
static const struct i915_subtest tests[] = { static const struct i915_subtest tests[] = {
...@@ -1322,6 +1501,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) ...@@ -1322,6 +1501,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_chain_preempt), SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang), SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke), SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
}; };
if (!HAS_EXECLISTS(i915)) if (!HAS_EXECLISTS(i915))
......
...@@ -91,4 +91,9 @@ static inline bool __tasklet_enable(struct tasklet_struct *t) ...@@ -91,4 +91,9 @@ static inline bool __tasklet_enable(struct tasklet_struct *t)
return atomic_dec_and_test(&t->count); return atomic_dec_and_test(&t->count);
} }
static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
{
return test_bit(TASKLET_STATE_SCHED, &t->state);
}
#endif /* __I915_GEM_H__ */ #endif /* __I915_GEM_H__ */
...@@ -86,6 +86,7 @@ ...@@ -86,6 +86,7 @@
*/ */
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/nospec.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
...@@ -1218,7 +1219,6 @@ __intel_context_reconfigure_sseu(struct intel_context *ce, ...@@ -1218,7 +1219,6 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
int ret; int ret;
GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8); GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
GEM_BUG_ON(ce->engine->id != RCS0);
ret = intel_context_lock_pinned(ce); ret = intel_context_lock_pinned(ce);
if (ret) if (ret)
...@@ -1412,7 +1412,100 @@ struct set_engines { ...@@ -1412,7 +1412,100 @@ struct set_engines {
struct i915_gem_engines *engines; struct i915_gem_engines *engines;
}; };
static int
set_engines__load_balance(struct i915_user_extension __user *base, void *data)
{
struct i915_context_engines_load_balance __user *ext =
container_of_user(base, typeof(*ext), base);
const struct set_engines *set = data;
struct intel_engine_cs *stack[16];
struct intel_engine_cs **siblings;
struct intel_context *ce;
u16 num_siblings, idx;
unsigned int n;
int err;
if (!HAS_EXECLISTS(set->ctx->i915))
return -ENODEV;
if (USES_GUC_SUBMISSION(set->ctx->i915))
return -ENODEV; /* not implement yet */
if (get_user(idx, &ext->engine_index))
return -EFAULT;
if (idx >= set->engines->num_engines) {
DRM_DEBUG("Invalid placement value, %d >= %d\n",
idx, set->engines->num_engines);
return -EINVAL;
}
idx = array_index_nospec(idx, set->engines->num_engines);
if (set->engines->engines[idx]) {
DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
return -EEXIST;
}
if (get_user(num_siblings, &ext->num_siblings))
return -EFAULT;
err = check_user_mbz(&ext->flags);
if (err)
return err;
err = check_user_mbz(&ext->mbz64);
if (err)
return err;
siblings = stack;
if (num_siblings > ARRAY_SIZE(stack)) {
siblings = kmalloc_array(num_siblings,
sizeof(*siblings),
GFP_KERNEL);
if (!siblings)
return -ENOMEM;
}
for (n = 0; n < num_siblings; n++) {
struct i915_engine_class_instance ci;
if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
err = -EFAULT;
goto out_siblings;
}
siblings[n] = intel_engine_lookup_user(set->ctx->i915,
ci.engine_class,
ci.engine_instance);
if (!siblings[n]) {
DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
n, ci.engine_class, ci.engine_instance);
err = -EINVAL;
goto out_siblings;
}
}
ce = intel_execlists_create_virtual(set->ctx, siblings, n);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out_siblings;
}
if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
intel_context_put(ce);
err = -EEXIST;
goto out_siblings;
}
out_siblings:
if (siblings != stack)
kfree(siblings);
return err;
}
static const i915_user_extension_fn set_engines__extensions[] = { static const i915_user_extension_fn set_engines__extensions[] = {
[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
}; };
static int static int
...@@ -1737,14 +1830,29 @@ static int clone_engines(struct i915_gem_context *dst, ...@@ -1737,14 +1830,29 @@ static int clone_engines(struct i915_gem_context *dst,
clone->i915 = dst->i915; clone->i915 = dst->i915;
for (n = 0; n < e->num_engines; n++) { for (n = 0; n < e->num_engines; n++) {
struct intel_engine_cs *engine;
if (!e->engines[n]) { if (!e->engines[n]) {
clone->engines[n] = NULL; clone->engines[n] = NULL;
continue; continue;
} }
engine = e->engines[n]->engine;
clone->engines[n] = /*
intel_context_create(dst, e->engines[n]->engine); * Virtual engines are singletons; they can only exist
if (!clone->engines[n]) { * inside a single context, because they embed their
* HW context... As each virtual context implies a single
* timeline (each engine can only dequeue a single request
* at any time), it would be surprising for two contexts
* to use the same engine. So let's create a copy of
* the virtual engine instead.
*/
if (intel_engine_is_virtual(engine))
clone->engines[n] =
intel_execlists_clone_virtual(dst, engine);
else
clone->engines[n] = intel_context_create(dst, engine);
if (IS_ERR_OR_NULL(clone->engines[n])) {
__free_engines(clone, n); __free_engines(clone, n);
goto err_unlock; goto err_unlock;
} }
......
...@@ -150,17 +150,26 @@ sched_lock_engine(const struct i915_sched_node *node, ...@@ -150,17 +150,26 @@ sched_lock_engine(const struct i915_sched_node *node,
struct intel_engine_cs *locked, struct intel_engine_cs *locked,
struct sched_cache *cache) struct sched_cache *cache)
{ {
struct intel_engine_cs *engine = node_to_request(node)->engine; const struct i915_request *rq = node_to_request(node);
struct intel_engine_cs *engine;
GEM_BUG_ON(!locked); GEM_BUG_ON(!locked);
if (engine != locked) { /*
* Virtual engines complicate acquiring the engine timeline lock,
* as their rq->engine pointer is not stable until under that
* engine lock. The simple ploy we use is to take the lock then
* check that the rq still belongs to the newly locked engine.
*/
while (locked != (engine = READ_ONCE(rq->engine))) {
spin_unlock(&locked->timeline.lock); spin_unlock(&locked->timeline.lock);
memset(cache, 0, sizeof(*cache)); memset(cache, 0, sizeof(*cache));
spin_lock(&engine->timeline.lock); spin_lock(&engine->timeline.lock);
locked = engine;
} }
return engine; GEM_BUG_ON(locked != engine);
return locked;
} }
static inline int rq_prio(const struct i915_request *rq) static inline int rq_prio(const struct i915_request *rq)
...@@ -272,6 +281,7 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -272,6 +281,7 @@ static void __i915_schedule(struct i915_sched_node *node,
spin_lock(&engine->timeline.lock); spin_lock(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */ /* Fifo and depth-first replacement ensure our deps execute before us */
engine = sched_lock_engine(node, engine, &cache);
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
INIT_LIST_HEAD(&dep->dfs_link); INIT_LIST_HEAD(&dep->dfs_link);
...@@ -283,8 +293,11 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -283,8 +293,11 @@ static void __i915_schedule(struct i915_sched_node *node,
if (prio <= node->attr.priority || node_signaled(node)) if (prio <= node->attr.priority || node_signaled(node))
continue; continue;
GEM_BUG_ON(node_to_request(node)->engine != engine);
node->attr.priority = prio; node->attr.priority = prio;
if (!list_empty(&node->link)) { if (!list_empty(&node->link)) {
GEM_BUG_ON(intel_engine_is_virtual(engine));
if (!cache.priolist) if (!cache.priolist)
cache.priolist = cache.priolist =
i915_sched_lookup_priolist(engine, i915_sched_lookup_priolist(engine,
......
...@@ -26,6 +26,7 @@ struct i915_timeline { ...@@ -26,6 +26,7 @@ struct i915_timeline {
spinlock_t lock; spinlock_t lock;
#define TIMELINE_CLIENT 0 /* default subclass */ #define TIMELINE_CLIENT 0 /* default subclass */
#define TIMELINE_ENGINE 1 #define TIMELINE_ENGINE 1
#define TIMELINE_VIRTUAL 2
struct mutex mutex; /* protects the flow of requests */ struct mutex mutex; /* protects the flow of requests */
unsigned int pin_count; unsigned int pin_count;
......
...@@ -137,6 +137,7 @@ struct i915_engine_class_instance { ...@@ -137,6 +137,7 @@ struct i915_engine_class_instance {
__u16 engine_class; /* see enum drm_i915_gem_engine_class */ __u16 engine_class; /* see enum drm_i915_gem_engine_class */
__u16 engine_instance; __u16 engine_instance;
#define I915_ENGINE_CLASS_INVALID_NONE -1 #define I915_ENGINE_CLASS_INVALID_NONE -1
#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
}; };
/** /**
...@@ -1608,8 +1609,46 @@ struct drm_i915_gem_context_param_sseu { ...@@ -1608,8 +1609,46 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd; __u32 rsvd;
}; };
/*
* i915_context_engines_load_balance:
*
* Enable load balancing across this set of engines.
*
* Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
* used will proxy the execbuffer request onto one of the set of engines
* in such a way as to distribute the load evenly across the set.
*
* The set of engines must be compatible (e.g. the same HW class) as they
* will share the same logical GPU context and ring.
*
* To intermix rendering with the virtual engine and direct rendering onto
* the backing engines (bypassing the load balancing proxy), the context must
* be defined to use a single timeline for all engines.
*/
struct i915_context_engines_load_balance {
struct i915_user_extension base;
__u16 engine_index;
__u16 num_siblings;
__u32 flags; /* all undefined flags must be zero */
__u64 mbz64; /* reserved for future use; must be zero */
struct i915_engine_class_instance engines[0];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
struct i915_user_extension base; \
__u16 engine_index; \
__u16 num_siblings; \
__u32 flags; \
__u64 mbz64; \
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
struct i915_context_param_engines { struct i915_context_param_engines {
__u64 extensions; /* linked chain of extension blocks, 0 terminates */ __u64 extensions; /* linked chain of extension blocks, 0 terminates */
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
struct i915_engine_class_instance engines[0]; struct i915_engine_class_instance engines[0];
} __attribute__((packed)); } __attribute__((packed));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment