Commit ee044a88 authored by John Harrison's avatar John Harrison Committed by Daniel Vetter

drm/i915: Update ring->add_request() to take a request structure

Updated the various ring->add_request() implementations to take a request
instead of a ring. This removes their reliance on the OLR to obtain the seqno
value that the request should be tagged with.

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 7deb4d39
...@@ -2524,7 +2524,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, ...@@ -2524,7 +2524,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
if (i915.enable_execlists) if (i915.enable_execlists)
ret = ring->emit_request(ringbuf, request); ret = ring->emit_request(ringbuf, request);
else { else {
ret = ring->add_request(ring); ret = ring->add_request(request);
request->tail = intel_ring_get_tail(ringbuf); request->tail = intel_ring_get_tail(ringbuf);
} }
......
...@@ -1288,16 +1288,16 @@ static int gen6_signal(struct intel_engine_cs *signaller, ...@@ -1288,16 +1288,16 @@ static int gen6_signal(struct intel_engine_cs *signaller,
/** /**
* gen6_add_request - Update the semaphore mailbox registers * gen6_add_request - Update the semaphore mailbox registers
* *
* @ring - ring that is adding a request * @request - request to write to the ring
* @seqno - return seqno stuck into the ring
* *
* Update the mailbox registers in the *other* rings with the current seqno. * Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore. * This acts like a signal in the canonical semaphore.
*/ */
static int static int
gen6_add_request(struct intel_engine_cs *ring) gen6_add_request(struct drm_i915_gem_request *req)
{ {
struct intel_engine_cs *ring = req->ring;
int ret; int ret;
if (ring->semaphore.signal) if (ring->semaphore.signal)
...@@ -1310,8 +1310,7 @@ gen6_add_request(struct intel_engine_cs *ring) ...@@ -1310,8 +1310,7 @@ gen6_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, intel_ring_emit(ring, i915_gem_request_get_seqno(req));
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring); __intel_ring_advance(ring);
...@@ -1408,8 +1407,9 @@ do { \ ...@@ -1408,8 +1407,9 @@ do { \
} while (0) } while (0)
static int static int
pc_render_add_request(struct intel_engine_cs *ring) pc_render_add_request(struct drm_i915_gem_request *req)
{ {
struct intel_engine_cs *ring = req->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret; int ret;
...@@ -1429,8 +1429,7 @@ pc_render_add_request(struct intel_engine_cs *ring) ...@@ -1429,8 +1429,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, intel_ring_emit(ring, i915_gem_request_get_seqno(req));
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr); PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
...@@ -1449,8 +1448,7 @@ pc_render_add_request(struct intel_engine_cs *ring) ...@@ -1449,8 +1448,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY); PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, intel_ring_emit(ring, i915_gem_request_get_seqno(req));
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
__intel_ring_advance(ring); __intel_ring_advance(ring);
...@@ -1619,8 +1617,9 @@ bsd_ring_flush(struct drm_i915_gem_request *req, ...@@ -1619,8 +1617,9 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
} }
static int static int
i9xx_add_request(struct intel_engine_cs *ring) i9xx_add_request(struct drm_i915_gem_request *req)
{ {
struct intel_engine_cs *ring = req->ring;
int ret; int ret;
ret = intel_ring_begin(ring, 4); ret = intel_ring_begin(ring, 4);
...@@ -1629,8 +1628,7 @@ i9xx_add_request(struct intel_engine_cs *ring) ...@@ -1629,8 +1628,7 @@ i9xx_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, intel_ring_emit(ring, i915_gem_request_get_seqno(req));
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring); __intel_ring_advance(ring);
......
...@@ -183,7 +183,7 @@ struct intel_engine_cs { ...@@ -183,7 +183,7 @@ struct intel_engine_cs {
int __must_check (*flush)(struct drm_i915_gem_request *req, int __must_check (*flush)(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains); u32 flush_domains);
int (*add_request)(struct intel_engine_cs *ring); int (*add_request)(struct drm_i915_gem_request *req);
/* Some chipsets are not quite as coherent as advertised and need /* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno. * an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last * However, the up-to-date seqno is not always required and the last
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment