Commit d26e3af8 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Refactor the wait_rendering completion into a common routine

Harmonise the completion logic between the non-blocking and normal
wait_rendering paths, and move that logic into a common function.

In the process, we note that the last_write_seqno is by definition the
earlier of the two read/write seqnos and so all successful waits will
have passed the last_write_seqno. Therefore we can unconditionally clear
the write seqno and its domains in the completion logic.

v2: Add the missing ring parameter, because sometimes it is good to have
things compile.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent daa13e1c
...@@ -1087,6 +1087,25 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) ...@@ -1087,6 +1087,25 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
interruptible, NULL); interruptible, NULL);
} }
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
i915_gem_retire_requests_ring(ring);
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*
* Note that the last_write_seqno is always the earlier of
* the two (read/write) seqno, so if we haved successfully waited,
* we know we have passed the last write.
*/
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
return 0;
}
/** /**
* Ensures that all rendering to the object has completed and the object is * Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU. * safe to unbind from the GTT or access from the CPU.
...@@ -1107,18 +1126,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, ...@@ -1107,18 +1126,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests_ring(ring); return i915_gem_object_wait_rendering__tail(obj, ring);
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
return 0;
} }
/* A nonblocking variant of the above wait. This is a highly dangerous routine /* A nonblocking variant of the above wait. This is a highly dangerous routine
...@@ -1154,20 +1162,10 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1154,20 +1162,10 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring); return i915_gem_object_wait_rendering__tail(obj, ring);
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (ret == 0 &&
obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
return ret;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment