Commit 535fbe82 authored by John Harrison's avatar John Harrison Committed by Daniel Vetter

drm/i915: Update move_to_gpu() to take a request structure

The plan is to pass requests around as the basic submission tracking structure
rather than rings and contexts. This patch updates the move_to_gpu() code paths.

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 95c24161
...@@ -891,10 +891,10 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -891,10 +891,10 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
} }
static int static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas) struct list_head *vmas)
{ {
const unsigned other_rings = ~intel_ring_flag(ring); const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
bool flush_chipset = false; bool flush_chipset = false;
...@@ -904,7 +904,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, ...@@ -904,7 +904,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) { if (obj->active & other_rings) {
ret = i915_gem_object_sync(obj, ring); ret = i915_gem_object_sync(obj, req->ring);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -916,7 +916,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, ...@@ -916,7 +916,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
} }
if (flush_chipset) if (flush_chipset)
i915_gem_chipset_flush(ring->dev); i915_gem_chipset_flush(req->ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT) if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb(); wmb();
...@@ -924,7 +924,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, ...@@ -924,7 +924,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush /* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch. * any residual writes from the previous batch.
*/ */
return intel_ring_invalidate_all_caches(ring); return intel_ring_invalidate_all_caches(req->ring);
} }
static bool static bool
...@@ -1246,7 +1246,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, ...@@ -1246,7 +1246,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
} }
} }
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (ret) if (ret)
goto error; goto error;
......
...@@ -624,12 +624,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf, ...@@ -624,12 +624,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
return 0; return 0;
} }
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct intel_context *ctx,
struct list_head *vmas) struct list_head *vmas)
{ {
struct intel_engine_cs *ring = ringbuf->ring; const unsigned other_rings = ~intel_ring_flag(req->ring);
const unsigned other_rings = ~intel_ring_flag(ring);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
bool flush_chipset = false; bool flush_chipset = false;
...@@ -639,7 +637,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, ...@@ -639,7 +637,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) { if (obj->active & other_rings) {
ret = i915_gem_object_sync(obj, ring); ret = i915_gem_object_sync(obj, req->ring);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -656,7 +654,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, ...@@ -656,7 +654,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush /* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch. * any residual writes from the previous batch.
*/ */
return logical_ring_invalidate_all_caches(ringbuf, ctx); return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
} }
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
...@@ -918,7 +916,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, ...@@ -918,7 +916,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
return -EINVAL; return -EINVAL;
} }
ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas); ret = execlists_move_to_gpu(params->request, vmas);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment