Commit a5236978 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Start returning an error from i915_vma_move_to_active()

Handling such a late error in request construction is tricky, but to
accommodate future patches which may allocate here, we potentially could
err. To handle the error after already adjusting global state to track
the new request, we must finish and submit the request. But we don't
want to use the request as not everything is being tracked by it, so we
opt to cancel the commands inside the request.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-3-chris@chris-wilson.co.uk
parent 6dd7526f
...@@ -476,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -476,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
i915_gem_obj_finish_shmem_access(bb->obj); i915_gem_obj_finish_shmem_access(bb->obj);
bb->accessing = false; bb->accessing = false;
i915_vma_move_to_active(bb->vma, workload->req, 0); ret = i915_vma_move_to_active(bb->vma,
workload->req,
0);
if (ret)
goto err;
} }
} }
return 0; return 0;
......
...@@ -3090,7 +3090,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) ...@@ -3090,7 +3090,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
} }
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
void i915_vma_move_to_active(struct i915_vma *vma, int __must_check i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq, struct i915_request *rq,
unsigned int flags); unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv, int i915_gem_dumb_create(struct drm_file *file_priv,
......
...@@ -1165,12 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1165,12 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_request; goto err_request;
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
i915_vma_move_to_active(batch, rq, 0); err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unpin(batch); if (err)
goto skip_request;
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto skip_request;
rq->batch = batch; rq->batch = batch;
i915_vma_unpin(batch);
cache->rq = rq; cache->rq = rq;
cache->rq_cmd = cmd; cache->rq_cmd = cmd;
...@@ -1179,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1179,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
/* Return with batch mapping (cmd) still pinned */ /* Return with batch mapping (cmd) still pinned */
return 0; return 0;
skip_request:
i915_request_skip(rq, err);
err_request: err_request:
i915_request_add(rq); i915_request_add(rq);
err_unpin: err_unpin:
...@@ -1818,7 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -1818,7 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
unsigned int flags = eb->flags[i]; unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i]; struct i915_vma *vma = eb->vma[i];
i915_vma_move_to_active(vma, eb->request, flags); err = i915_vma_move_to_active(vma, eb->request, flags);
if (unlikely(err)) {
i915_request_skip(eb->request, err);
return err;
}
__eb_unreserve_vma(vma, flags); __eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL; vma->exec_flags = NULL;
...@@ -1877,7 +1887,7 @@ static void export_fence(struct i915_vma *vma, ...@@ -1877,7 +1887,7 @@ static void export_fence(struct i915_vma *vma,
reservation_object_unlock(resv); reservation_object_unlock(resv);
} }
void i915_vma_move_to_active(struct i915_vma *vma, int i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq, struct i915_request *rq,
unsigned int flags) unsigned int flags)
{ {
...@@ -1916,6 +1926,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1916,6 +1926,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
i915_gem_active_set(&vma->last_fence, rq); i915_gem_active_set(&vma->last_fence, rq);
export_fence(vma, rq, flags); export_fence(vma, rq, flags);
return 0;
} }
static int i915_reset_gen7_sol_offsets(struct i915_request *rq) static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
......
...@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) ...@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
goto err_unpin; goto err_unpin;
} }
i915_vma_move_to_active(so.vma, rq, 0); err = i915_vma_move_to_active(so.vma, rq, 0);
err_unpin: err_unpin:
i915_vma_unpin(so.vma); i915_vma_unpin(so.vma);
err_vma: err_vma:
......
...@@ -985,7 +985,10 @@ static int gpu_write(struct i915_vma *vma, ...@@ -985,7 +985,10 @@ static int gpu_write(struct i915_vma *vma,
goto err_request; goto err_request;
} }
i915_vma_move_to_active(batch, rq, 0); err = i915_vma_move_to_active(batch, rq, 0);
if (err)
goto err_request;
i915_gem_object_set_active_reference(batch->obj); i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch); i915_vma_unpin(batch);
i915_vma_close(batch); i915_vma_close(batch);
...@@ -996,7 +999,9 @@ static int gpu_write(struct i915_vma *vma, ...@@ -996,7 +999,9 @@ static int gpu_write(struct i915_vma *vma,
if (err) if (err)
goto err_request; goto err_request;
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err)
i915_request_skip(rq, err);
err_request: err_request:
i915_request_add(rq); i915_request_add(rq);
......
...@@ -222,12 +222,12 @@ static int gpu_set(struct drm_i915_gem_object *obj, ...@@ -222,12 +222,12 @@ static int gpu_set(struct drm_i915_gem_object *obj,
} }
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_request_add(rq); i915_request_add(rq);
return 0; return err;
} }
static bool always_valid(struct drm_i915_private *i915) static bool always_valid(struct drm_i915_private *i915)
......
...@@ -170,18 +170,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj, ...@@ -170,18 +170,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err) if (err)
goto err_request; goto err_request;
i915_vma_move_to_active(batch, rq, 0); err = i915_vma_move_to_active(batch, rq, 0);
if (err)
goto skip_request;
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto skip_request;
i915_gem_object_set_active_reference(batch->obj); i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch); i915_vma_unpin(batch);
i915_vma_close(batch); i915_vma_close(batch);
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_request_add(rq); i915_request_add(rq);
return 0; return 0;
skip_request:
i915_request_skip(rq, err);
err_request: err_request:
i915_request_add(rq); i915_request_add(rq);
err_batch: err_batch:
......
...@@ -464,13 +464,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) ...@@ -464,13 +464,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
return PTR_ERR(rq); return PTR_ERR(rq);
} }
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_request_add(rq); i915_request_add(rq);
i915_gem_object_set_active_reference(obj); __i915_gem_object_release_unless_active(obj);
i915_vma_unpin(vma); i915_vma_unpin(vma);
return 0;
return err;
} }
static bool assert_mmap_offset(struct drm_i915_private *i915, static bool assert_mmap_offset(struct drm_i915_private *i915,
......
...@@ -675,7 +675,9 @@ static int live_all_engines(void *arg) ...@@ -675,7 +675,9 @@ static int live_all_engines(void *arg)
i915_gem_object_set_active_reference(batch->obj); i915_gem_object_set_active_reference(batch->obj);
} }
i915_vma_move_to_active(batch, request[id], 0); err = i915_vma_move_to_active(batch, request[id], 0);
GEM_BUG_ON(err);
i915_request_get(request[id]); i915_request_get(request[id]);
i915_request_add(request[id]); i915_request_add(request[id]);
} }
...@@ -785,7 +787,9 @@ static int live_sequential_engines(void *arg) ...@@ -785,7 +787,9 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err); GEM_BUG_ON(err);
request[id]->batch = batch; request[id]->batch = batch;
i915_vma_move_to_active(batch, request[id], 0); err = i915_vma_move_to_active(batch, request[id], 0);
GEM_BUG_ON(err);
i915_gem_object_set_active_reference(batch->obj); i915_gem_object_set_active_reference(batch->obj);
i915_vma_get(batch); i915_vma_get(batch);
......
...@@ -130,13 +130,19 @@ static int emit_recurse_batch(struct hang *h, ...@@ -130,13 +130,19 @@ static int emit_recurse_batch(struct hang *h,
if (err) if (err)
goto unpin_vma; goto unpin_vma;
i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
if (err)
goto unpin_hws;
if (!i915_gem_object_has_active_reference(vma->obj)) { if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj); i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj); i915_gem_object_set_active_reference(vma->obj);
} }
i915_vma_move_to_active(hws, rq, 0); err = i915_vma_move_to_active(hws, rq, 0);
if (err)
goto unpin_hws;
if (!i915_gem_object_has_active_reference(hws->obj)) { if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj); i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj); i915_gem_object_set_active_reference(hws->obj);
...@@ -205,6 +211,7 @@ static int emit_recurse_batch(struct hang *h, ...@@ -205,6 +211,7 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
unpin_hws:
i915_vma_unpin(hws); i915_vma_unpin(hws);
unpin_vma: unpin_vma:
i915_vma_unpin(vma); i915_vma_unpin(vma);
......
...@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin, ...@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin,
if (err) if (err)
goto unpin_vma; goto unpin_vma;
i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
if (err)
goto unpin_hws;
if (!i915_gem_object_has_active_reference(vma->obj)) { if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj); i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj); i915_gem_object_set_active_reference(vma->obj);
} }
i915_vma_move_to_active(hws, rq, 0); err = i915_vma_move_to_active(hws, rq, 0);
if (err)
goto unpin_hws;
if (!i915_gem_object_has_active_reference(hws->obj)) { if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj); i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj); i915_gem_object_set_active_reference(hws->obj);
...@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin, ...@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
unpin_hws:
i915_vma_unpin(hws); i915_vma_unpin(hws);
unpin_vma: unpin_vma:
i915_vma_unpin(vma); i915_vma_unpin(vma);
......
...@@ -49,6 +49,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) ...@@ -49,6 +49,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_pin; goto err_pin;
} }
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_req;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (INTEL_GEN(ctx->i915) >= 8) if (INTEL_GEN(ctx->i915) >= 8)
srm++; srm++;
...@@ -67,8 +71,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) ...@@ -67,8 +71,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
} }
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_gem_object_get(result); i915_gem_object_get(result);
i915_gem_object_set_active_reference(result); i915_gem_object_set_active_reference(result);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment