Commit b361237b authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Juggle code order to ease flow of the next patch

Move the wait-for-rendering logic around in the file so that we can
group it together with the subsequent variations. The general goal is to
have the lower level routines clustered together and then the higher
level logic building upon those low level routines that came before.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 504c7267
......@@ -945,6 +945,194 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return ret;
}
int
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
bool interruptible)
{
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
unsigned long flags;
/* Give the error handler a chance to run. */
spin_lock_irqsave(&x->wait.lock, flags);
recovery_complete = x->done > 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
/* Recovery complete, but still wedged means reset failure. */
if (recovery_complete)
return -EIO;
return -EAGAIN;
}
return 0;
}
/*
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
static int
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ret = 0;
if (seqno == ring->outstanding_lazy_request)
ret = i915_add_request(ring, NULL, NULL);
return ret;
}
/**
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct timespec before, now, wait_time={1,0};
unsigned long timeout_jiffies;
long end;
bool wait_forever = true;
int ret;
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
trace_i915_gem_request_wait_begin(ring, seqno);
if (timeout != NULL) {
wait_time = *timeout;
wait_forever = false;
}
timeout_jiffies = timespec_to_jiffies(&wait_time);
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged * */
getrawmonotonic(&before);
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
EXIT_COND,
timeout_jiffies);
else
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
ret = i915_gem_check_wedge(dev_priv, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
getrawmonotonic(&now);
ring->irq_put(ring);
trace_i915_gem_request_wait_end(ring, seqno);
#undef EXIT_COND
if (timeout) {
struct timespec sleep_time = timespec_sub(now, before);
*timeout = timespec_sub(*timeout, sleep_time);
}
switch (end) {
case -EIO:
case -EAGAIN: /* Wedged */
case -ERESTARTSYS: /* Signal */
return (int)end;
case 0: /* Timeout */
if (timeout)
set_normalized_timespec(timeout, 0, 0);
return -ETIME;
default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */
return 0;
}
}
/**
* Waits for a sequence number to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible = dev_priv->mm.interruptible;
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
ret = i915_gem_check_wedge(dev_priv, interruptible);
if (ret)
return ret;
ret = i915_gem_check_olr(ring, seqno);
if (ret)
return ret;
return __wait_seqno(ring, seqno, interruptible, NULL);
}
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
struct intel_ring_buffer *ring = obj->ring;
u32 seqno;
int ret;
seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
if (seqno == 0)
return 0;
ret = i915_wait_seqno(ring, seqno);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
return 0;
}
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
......@@ -1952,197 +2140,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
int
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
bool interruptible)
{
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
unsigned long flags;
/* Give the error handler a chance to run. */
spin_lock_irqsave(&x->wait.lock, flags);
recovery_complete = x->done > 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
/* Recovery complete, but still wedged means reset failure. */
if (recovery_complete)
return -EIO;
return -EAGAIN;
}
return 0;
}
/*
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
static int
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ret = 0;
if (seqno == ring->outstanding_lazy_request)
ret = i915_add_request(ring, NULL, NULL);
return ret;
}
/**
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct timespec before, now, wait_time={1,0};
unsigned long timeout_jiffies;
long end;
bool wait_forever = true;
int ret;
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
trace_i915_gem_request_wait_begin(ring, seqno);
if (timeout != NULL) {
wait_time = *timeout;
wait_forever = false;
}
timeout_jiffies = timespec_to_jiffies(&wait_time);
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged * */
getrawmonotonic(&before);
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
EXIT_COND,
timeout_jiffies);
else
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
ret = i915_gem_check_wedge(dev_priv, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
getrawmonotonic(&now);
ring->irq_put(ring);
trace_i915_gem_request_wait_end(ring, seqno);
#undef EXIT_COND
if (timeout) {
struct timespec sleep_time = timespec_sub(now, before);
*timeout = timespec_sub(*timeout, sleep_time);
}
switch (end) {
case -EIO:
case -EAGAIN: /* Wedged */
case -ERESTARTSYS: /* Signal */
return (int)end;
case 0: /* Timeout */
if (timeout)
set_normalized_timespec(timeout, 0, 0);
return -ETIME;
default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */
return 0;
}
}
/**
* Waits for a sequence number to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int ret = 0;
BUG_ON(seqno == 0);
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
if (ret)
return ret;
ret = i915_gem_check_olr(ring, seqno);
if (ret)
return ret;
ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
return ret;
}
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
u32 seqno;
int ret;
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
if (readonly)
seqno = obj->last_write_seqno;
else
seqno = obj->last_read_seqno;
if (seqno == 0)
return 0;
ret = i915_wait_seqno(obj->ring, seqno);
if (ret)
return ret;
/* Manually manage the write flush as we may have not yet retired
* the buffer.
*/
if (obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
i915_gem_retire_requests_ring(obj->ring);
return 0;
}
/**
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment