Commit 40850a73 authored by Chris Wilson's avatar Chris Wilson Committed by Kamal Mostafa

drm/i915: Only spin whilst waiting on the current request

commit 0f0cd472 upstream.

Limit busywaiting only to the request currently being processed by the
GPU. If the request is not currently being processed by the GPU, there
is a very low likelihood of it being completed within the 2 microsecond
spin timeout and so we will just be wasting CPU cycles.

v2: Check for logical inversion when rebasing - we were incorrectly
checking for this request being active, and instead busywaiting for
when the GPU was not yet processing the request of interest.

v3: Try another colour for the seqno names.
v4: Another colour for the function names.

v5: Remove the forced coherency when checking for the active request. On
reflection and plenty of recent experimentation, the issue is not a
cache coherency problem - but an irq/seqno ordering problem (timing issue).
Here, we do not need the w/a to force ordering of the read with an
interrupt.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: "Rogozhkin, Dmitry V" <dmitry.v.rogozhkin@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Eero Tamminen <eero.t.tamminen@intel.com>
Cc: "Rantala, Valtteri" <valtteri.rantala@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1449833608-22125-4-git-send-email-chris@chris-wilson.co.uk
(cherry picked from commit 821485dc)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Signed-off-by: default avatarKamal Mostafa <kamal@canonical.com>
parent 37243592
...@@ -2119,8 +2119,17 @@ struct drm_i915_gem_request { ...@@ -2119,8 +2119,17 @@ struct drm_i915_gem_request {
struct drm_i915_private *i915; struct drm_i915_private *i915;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
/** GEM sequence number associated with this request. */ /** GEM sequence number associated with the previous request,
uint32_t seqno; * when the HWS breadcrumb is equal to this the GPU is processing
* this request.
*/
u32 previous_seqno;
/** GEM sequence number associated with this request,
* when the HWS breadcrumb is equal or greater than this the GPU
* has finished processing this request.
*/
u32 seqno;
/** Position in the ringbuffer of the start of the request */ /** Position in the ringbuffer of the start of the request */
u32 head; u32 head;
...@@ -2797,15 +2806,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) ...@@ -2797,15 +2806,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0; return (int32_t)(seq1 - seq2) >= 0;
} }
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
bool lazy_coherency) bool lazy_coherency)
{ {
u32 seqno; u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->previous_seqno);
BUG_ON(req == NULL); }
seqno = req->ring->get_seqno(req->ring, lazy_coherency);
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->seqno); return i915_seqno_passed(seqno, req->seqno);
} }
......
...@@ -1221,9 +1221,13 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) ...@@ -1221,9 +1221,13 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* takes to sleep on a request, on the order of a microsecond. * takes to sleep on a request, on the order of a microsecond.
*/ */
if (i915_gem_request_get_ring(req)->irq_refcount) if (req->ring->irq_refcount)
return -EBUSY; return -EBUSY;
/* Only spin if we know the GPU is processing this request */
if (!i915_gem_request_started(req, true))
return -EAGAIN;
timeout = local_clock_us(&cpu) + 5; timeout = local_clock_us(&cpu) + 5;
while (!need_resched()) { while (!need_resched()) {
if (i915_gem_request_completed(req, true)) if (i915_gem_request_completed(req, true))
...@@ -1237,6 +1241,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) ...@@ -1237,6 +1241,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
cpu_relax_lowlatency(); cpu_relax_lowlatency();
} }
if (i915_gem_request_completed(req, false)) if (i915_gem_request_completed(req, false))
return 0; return 0;
...@@ -2594,6 +2599,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2594,6 +2599,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
} }
request->emitted_jiffies = jiffies; request->emitted_jiffies = jiffies;
request->previous_seqno = ring->last_submitted_seqno;
ring->last_submitted_seqno = request->seqno; ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list); list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL; request->file_priv = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment