Commit ba4134a4 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Save trip via top-level i915 in a few more places

For gt related operations it makes more logical sense to stay in the realm
of gt instead of dereferencing via driver i915.

This patch handles a few of the easy ones with work requiring more
refactoring still outstanding.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-30-tvrtko.ursulin@linux.intel.com
parent db45fb5b
......@@ -1039,8 +1039,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u64 size, u64 offset,
u32 dword, u32 val)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
......
......@@ -234,8 +234,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
struct intel_engine_cs *engine,
unsigned int dw)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
......
......@@ -584,7 +584,7 @@ static int init_status_page(struct intel_engine_cs *engine)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
......
......@@ -1954,7 +1954,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
......@@ -3038,7 +3038,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
......
......@@ -1414,7 +1414,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_unpin_map(obj);
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
......
......@@ -1420,7 +1420,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
if (IS_ERR(vma))
return PTR_ERR(vma);
......
......@@ -130,7 +130,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
......@@ -143,12 +143,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
struct drm_i915_gem_object *obj;
void *vaddr;
obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
vaddr = i915_gem_object_pin_map(obj,
i915_coherent_map_type(h->i915));
i915_coherent_map_type(i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
return ERR_CAST(vaddr);
......@@ -255,7 +255,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
}
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
if (INTEL_GEN(i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
......
......@@ -103,7 +103,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
......
......@@ -1901,7 +1901,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
if (flush) {
mark_tlbs_dirty(&ppgtt->base);
gen6_ggtt_invalidate(&vm->i915->ggtt);
gen6_ggtt_invalidate(vm->gt->ggtt);
}
goto out;
......@@ -2045,7 +2045,7 @@ static const struct i915_vma_ops pd_vma_ops = {
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
struct drm_i915_private *i915 = ppgtt->base.vm.i915;
struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_vma *vma;
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
......
......@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
......
......@@ -1405,12 +1405,12 @@ capture_object(struct drm_i915_private *dev_priv,
static void gem_record_rings(struct i915_gpu_state *error)
{
struct drm_i915_private *i915 = error->i915;
struct i915_ggtt *ggtt = &i915->ggtt;
int i;
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct i915_ggtt *ggtt = engine->gt->ggtt;
struct i915_request *request;
ee->engine_id = -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment