Commit 299259a3 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Store the reset counter when constructing a request

As the request is only valid during the same global reset epoch, we can
record the current reset_counter when constructing the request and reuse
it when waiting upon that request in future. This removes a very hairy
atomic check serialised by the struct_mutex at the time of waiting and
allows us to transfer those waits to a central dispatcher for all
waiters and all requests.

PS: With per-engine resets, we obviously cannot assume a global reset
epoch for the requests - a per-engine epoch makes the most sense. The
challenge then is how to handle checking in the waiter for when to break
the wait, as the fine-grained reset may also want to requeue the
request (i.e. the assumption that just because the epoch changes the
request is completed may be broken - or we just avoid breaking that
assumption with the fine-grained resets).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-7-git-send-email-chris@chris-wilson.co.uk
parent d98c52cf
...@@ -2250,6 +2250,7 @@ struct drm_i915_gem_request { ...@@ -2250,6 +2250,7 @@ struct drm_i915_gem_request {
/** On Which ring this request was generated */ /** On Which ring this request was generated */
struct drm_i915_private *i915; struct drm_i915_private *i915;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
unsigned reset_counter;
/** GEM sequence number associated with the previous request, /** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing * when the HWS breadcrumb is equal to this the GPU is processing
...@@ -3160,7 +3161,6 @@ void __i915_add_request(struct drm_i915_gem_request *req, ...@@ -3160,7 +3161,6 @@ void __i915_add_request(struct drm_i915_gem_request *req,
#define i915_add_request_no_flush(req) \ #define i915_add_request_no_flush(req) \
__i915_add_request(req, NULL, false) __i915_add_request(req, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req, int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible, bool interruptible,
s64 *timeout, s64 *timeout,
struct intel_rps_client *rps); struct intel_rps_client *rps);
......
...@@ -1213,7 +1213,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) ...@@ -1213,7 +1213,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
/** /**
* __i915_wait_request - wait until execution of request has finished * __i915_wait_request - wait until execution of request has finished
* @req: duh! * @req: duh!
* @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes) * @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining * @timeout: in - how long to wait (NULL forever); out - how much time remaining
* *
...@@ -1228,7 +1227,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) ...@@ -1228,7 +1227,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* errno with remaining time filled in timeout argument. * errno with remaining time filled in timeout argument.
*/ */
int __i915_wait_request(struct drm_i915_gem_request *req, int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible, bool interruptible,
s64 *timeout, s64 *timeout,
struct intel_rps_client *rps) struct intel_rps_client *rps)
...@@ -1290,7 +1288,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -1290,7 +1288,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
/* We need to check whether any gpu reset happened in between /* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */ * the caller grabbing the seqno and now ... */
if (reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
/* ... but upgrade the -EAGAIN to an -EIO if the gpu /* ... but upgrade the -EAGAIN to an -EIO if the gpu
* is truely gone. */ * is truely gone. */
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
...@@ -1460,13 +1458,7 @@ i915_wait_request(struct drm_i915_gem_request *req) ...@@ -1460,13 +1458,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->struct_mutex));
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); ret = __i915_wait_request(req, interruptible, NULL, NULL);
if (ret)
return ret;
ret = __i915_wait_request(req,
i915_reset_counter(&dev_priv->gpu_error),
interruptible, NULL, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -1541,7 +1533,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1541,7 +1533,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
unsigned reset_counter;
int ret, i, n = 0; int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->struct_mutex));
...@@ -1550,12 +1541,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1550,12 +1541,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (!obj->active) if (!obj->active)
return 0; return 0;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (readonly) { if (readonly) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
...@@ -1577,9 +1562,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1577,9 +1562,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = 0;
for (i = 0; ret == 0 && i < n; i++) for (i = 0; ret == 0 && i < n; i++)
ret = __i915_wait_request(requests[i], reset_counter, true, ret = __i915_wait_request(requests[i], true, NULL, rps);
NULL, rps);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
...@@ -2735,6 +2720,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -2735,6 +2720,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
struct drm_i915_gem_request **req_out) struct drm_i915_gem_request **req_out)
{ {
struct drm_i915_private *dev_priv = to_i915(engine->dev); struct drm_i915_private *dev_priv = to_i915(engine->dev);
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
int ret; int ret;
...@@ -2743,6 +2729,11 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -2743,6 +2729,11 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
*req_out = NULL; *req_out = NULL;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (req == NULL) if (req == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -2754,6 +2745,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -2754,6 +2745,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
kref_init(&req->ref); kref_init(&req->ref);
req->i915 = dev_priv; req->i915 = dev_priv;
req->engine = engine; req->engine = engine;
req->reset_counter = reset_counter;
req->ctx = ctx; req->ctx = ctx;
i915_gem_context_reference(req->ctx); i915_gem_context_reference(req->ctx);
...@@ -3132,11 +3124,9 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) ...@@ -3132,11 +3124,9 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data; struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_request *req[I915_NUM_ENGINES]; struct drm_i915_gem_request *req[I915_NUM_ENGINES];
unsigned reset_counter;
int i, n = 0; int i, n = 0;
int ret; int ret;
...@@ -3170,7 +3160,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3170,7 +3160,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
} }
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL) if (obj->last_read_req[i] == NULL)
...@@ -3183,7 +3172,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3183,7 +3172,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (ret == 0) if (ret == 0)
ret = __i915_wait_request(req[i], reset_counter, true, ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL, args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file)); to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]); i915_gem_request_unreference__unlocked(req[i]);
...@@ -3215,7 +3204,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -3215,7 +3204,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (!i915_semaphore_is_enabled(obj->base.dev)) { if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req, ret = __i915_wait_request(from_req,
i915_reset_counter(&i915->gpu_error),
i915->mm.interruptible, i915->mm.interruptible,
NULL, NULL,
&i915->rps.semaphores); &i915->rps.semaphores);
...@@ -4171,7 +4159,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ...@@ -4171,7 +4159,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL; struct drm_i915_gem_request *request, *target = NULL;
unsigned reset_counter;
int ret; int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error); ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
...@@ -4196,7 +4183,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ...@@ -4196,7 +4183,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request; target = request;
} }
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (target) if (target)
i915_gem_request_reference(target); i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock); spin_unlock(&file_priv->mm.lock);
...@@ -4204,7 +4190,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ...@@ -4204,7 +4190,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL) if (target == NULL)
return 0; return 0;
ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); ret = __i915_wait_request(target, true, NULL, NULL);
if (ret == 0) if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
......
...@@ -65,7 +65,6 @@ static void wait_rendering(struct drm_i915_gem_object *obj) ...@@ -65,7 +65,6 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
unsigned reset_counter;
int i, n; int i, n;
if (!obj->active) if (!obj->active)
...@@ -82,12 +81,10 @@ static void wait_rendering(struct drm_i915_gem_object *obj) ...@@ -82,12 +81,10 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
requests[n++] = i915_gem_request_reference(req); requests[n++] = i915_gem_request_reference(req);
} }
reset_counter = atomic_read(&to_i915(dev)->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
__i915_wait_request(requests[i], reset_counter, false, __i915_wait_request(requests[i], false, NULL, NULL);
NULL, NULL);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
......
...@@ -11365,7 +11365,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work) ...@@ -11365,7 +11365,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
if (mmio_flip->req) { if (mmio_flip->req) {
WARN_ON(__i915_wait_request(mmio_flip->req, WARN_ON(__i915_wait_request(mmio_flip->req,
mmio_flip->crtc->reset_counter,
false, NULL, false, NULL,
&mmio_flip->i915->rps.mmioflips)); &mmio_flip->i915->rps.mmioflips));
i915_gem_request_unreference__unlocked(mmio_flip->req); i915_gem_request_unreference__unlocked(mmio_flip->req);
...@@ -13426,9 +13425,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, ...@@ -13426,9 +13425,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = drm_atomic_helper_prepare_planes(dev, state); ret = drm_atomic_helper_prepare_planes(dev, state);
if (!ret && !async && !i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) { if (!ret && !async && !i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
u32 reset_counter;
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
for_each_plane_in_state(state, plane, plane_state, i) { for_each_plane_in_state(state, plane, plane_state, i) {
...@@ -13439,8 +13435,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, ...@@ -13439,8 +13435,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
continue; continue;
ret = __i915_wait_request(intel_plane_state->wait_req, ret = __i915_wait_request(intel_plane_state->wait_req,
reset_counter, true, true, NULL, NULL);
NULL, NULL);
/* Swallow -EIO errors to allow updates during hw lockup. */ /* Swallow -EIO errors to allow updates during hw lockup. */
if (ret == -EIO) if (ret == -EIO)
......
...@@ -890,16 +890,9 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) ...@@ -890,16 +890,9 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
*/ */
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{ {
struct drm_i915_private *dev_priv;
int ret; int ret;
WARN_ON(req == NULL); WARN_ON(req == NULL);
dev_priv = req->i915;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
if (ret) if (ret)
......
...@@ -2364,7 +2364,6 @@ int intel_engine_idle(struct intel_engine_cs *engine) ...@@ -2364,7 +2364,6 @@ int intel_engine_idle(struct intel_engine_cs *engine)
/* Make sure we do not trigger any retires */ /* Make sure we do not trigger any retires */
return __i915_wait_request(req, return __i915_wait_request(req,
i915_reset_counter(&req->i915->gpu_error),
req->i915->mm.interruptible, req->i915->mm.interruptible,
NULL, NULL); NULL, NULL);
} }
...@@ -2495,11 +2494,6 @@ int intel_ring_begin(struct drm_i915_gem_request *req, ...@@ -2495,11 +2494,6 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
engine = req->engine; engine = req->engine;
dev_priv = req->i915; dev_priv = req->i915;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t)); ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment