Commit e568ac38 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Pull kref into i915_address_space

Make the kref common to both derived structs (i915_ggtt and i915_ppgtt)
so that we can safely reference count an abstract ctx->vm address space.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190611091238.15808-1-chris@chris-wilson.co.uk
parent a8cff4c8
...@@ -250,13 +250,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, ...@@ -250,13 +250,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_gem_context *ctx = ce->gem_context; struct i915_gem_context *ctx = ce->gem_context;
struct i915_address_space *vm; struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct clear_pages_work *work; struct clear_pages_work *work;
struct i915_sleeve *sleeve; struct i915_sleeve *sleeve;
int err; int err;
vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
sleeve = create_sleeve(vm, obj, pages, page_sizes); sleeve = create_sleeve(vm, obj, pages, page_sizes);
if (IS_ERR(sleeve)) if (IS_ERR(sleeve))
return PTR_ERR(sleeve); return PTR_ERR(sleeve);
......
This diff is collapsed.
...@@ -25,7 +25,7 @@ struct pid; ...@@ -25,7 +25,7 @@ struct pid;
struct drm_i915_private; struct drm_i915_private;
struct drm_i915_file_private; struct drm_i915_file_private;
struct i915_hw_ppgtt; struct i915_address_space;
struct i915_timeline; struct i915_timeline;
struct intel_ring; struct intel_ring;
...@@ -80,7 +80,7 @@ struct i915_gem_context { ...@@ -80,7 +80,7 @@ struct i915_gem_context {
struct i915_timeline *timeline; struct i915_timeline *timeline;
/** /**
* @ppgtt: unique address space (GTT) * @vm: unique address space (GTT)
* *
* In full-ppgtt mode, each context has its own address space ensuring * In full-ppgtt mode, each context has its own address space ensuring
* complete seperation of one client from all others. * complete seperation of one client from all others.
...@@ -88,7 +88,7 @@ struct i915_gem_context { ...@@ -88,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that * In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT. * the caller uses the shared global GTT.
*/ */
struct i915_hw_ppgtt *ppgtt; struct i915_address_space *vm;
/** /**
* @pid: process id of creator * @pid: process id of creator
......
...@@ -723,8 +723,8 @@ static int eb_select_context(struct i915_execbuffer *eb) ...@@ -723,8 +723,8 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT; return -ENOENT;
eb->gem_context = ctx; eb->gem_context = ctx;
if (ctx->ppgtt) { if (ctx->vm) {
eb->vm = &ctx->ppgtt->vm; eb->vm = ctx->vm;
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
} else { } else {
eb->vm = &eb->i915->ggtt.vm; eb->vm = &eb->i915->ggtt.vm;
......
...@@ -49,14 +49,12 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, ...@@ -49,14 +49,12 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_gem_context *ctx = ce->gem_context; struct i915_gem_context *ctx = ce->gem_context;
struct i915_address_space *vm; struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
/* XXX: ce->vm please */ /* XXX: ce->vm please */
vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
......
...@@ -768,14 +768,14 @@ i915_gem_userptr_ioctl(struct drm_device *dev, ...@@ -768,14 +768,14 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT; return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) { if (args->flags & I915_USERPTR_READ_ONLY) {
struct i915_hw_ppgtt *ppgtt; struct i915_address_space *vm;
/* /*
* On almost all of the older hw, we cannot tell the GPU that * On almost all of the older hw, we cannot tell the GPU that
* a page is readonly. * a page is readonly.
*/ */
ppgtt = dev_priv->kernel_context->ppgtt; vm = dev_priv->kernel_context->vm;
if (!ppgtt || !ppgtt->vm.has_read_only) if (!vm || !vm->has_read_only)
return -ENODEV; return -ENODEV;
} }
......
...@@ -1038,8 +1038,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, ...@@ -1038,8 +1038,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val) u32 dword, u32 val)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
...@@ -1092,8 +1091,7 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1092,8 +1091,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES]; static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
I915_RND_STATE(prng); I915_RND_STATE(prng);
...@@ -1419,7 +1417,7 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1419,7 +1417,7 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915; struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
...@@ -1434,7 +1432,7 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1434,7 +1432,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages. * huge-gtt-pages.
*/ */
if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) { if (!vm || !i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n"); pr_info("48b PPGTT not supported, skipping\n");
return 0; return 0;
} }
...@@ -1449,7 +1447,7 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1449,7 +1447,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out_put; goto out_put;
...@@ -1503,7 +1501,7 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1503,7 +1501,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out_put; goto out_put;
...@@ -1541,8 +1539,7 @@ static int igt_tmpfs_fallback(void *arg) ...@@ -1541,8 +1539,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs; struct vfsmount *gemfs = i915->mm.gemfs;
struct i915_address_space *vm = struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
u32 *vaddr; u32 *vaddr;
...@@ -1599,8 +1596,7 @@ static int igt_shrink_thp(void *arg) ...@@ -1599,8 +1596,7 @@ static int igt_shrink_thp(void *arg)
{ {
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags = PIN_USER; unsigned int flags = PIN_USER;
...@@ -1721,7 +1717,7 @@ int i915_gem_huge_page_mock_selftests(void) ...@@ -1721,7 +1717,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt); err = i915_subtests(tests, ppgtt);
out_close: out_close:
i915_ppgtt_put(ppgtt); i915_vm_put(&ppgtt->vm);
out_unlock: out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
...@@ -1766,8 +1762,8 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) ...@@ -1766,8 +1762,8 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
goto out_unlock; goto out_unlock;
} }
if (ctx->ppgtt) if (ctx->vm)
ctx->ppgtt->vm.scrub_64K = true; ctx->vm->scrub_64K = true;
err = i915_subtests(tests, ctx); err = i915_subtests(tests, ctx);
......
...@@ -248,8 +248,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, ...@@ -248,8 +248,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
unsigned int dw) unsigned int dw)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
struct i915_vma *batch; struct i915_vma *batch;
...@@ -438,8 +437,7 @@ create_test_object(struct i915_gem_context *ctx, ...@@ -438,8 +437,7 @@ create_test_object(struct i915_gem_context *ctx,
struct list_head *objects) struct list_head *objects)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm = struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
u64 size; u64 size;
int err; int err;
...@@ -541,7 +539,7 @@ static int igt_ctx_exec(void *arg) ...@@ -541,7 +539,7 @@ static int igt_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id, engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err); yesno(!!ctx->vm), err);
goto out_unlock; goto out_unlock;
} }
...@@ -612,7 +610,7 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -612,7 +610,7 @@ static int igt_shared_ctx_exec(void *arg)
goto out_unlock; goto out_unlock;
} }
if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */ if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0; err = 0;
goto out_unlock; goto out_unlock;
} }
...@@ -643,7 +641,7 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -643,7 +641,7 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test; goto out_test;
} }
__assign_ppgtt(ctx, parent->ppgtt); __assign_ppgtt(ctx, parent->vm);
if (!obj) { if (!obj) {
obj = create_test_object(parent, file, &objects); obj = create_test_object(parent, file, &objects);
...@@ -661,7 +659,7 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -661,7 +659,7 @@ static int igt_shared_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id, engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err); yesno(!!ctx->vm), err);
kernel_context_close(ctx); kernel_context_close(ctx);
goto out_test; goto out_test;
} }
...@@ -758,7 +756,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -758,7 +756,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL); vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
...@@ -1176,8 +1174,8 @@ static int igt_ctx_readonly(void *arg) ...@@ -1176,8 +1174,8 @@ static int igt_ctx_readonly(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
struct i915_address_space *vm;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
unsigned long idx, ndwords, dw; unsigned long idx, ndwords, dw;
struct igt_live_test t; struct igt_live_test t;
struct drm_file *file; struct drm_file *file;
...@@ -1208,8 +1206,8 @@ static int igt_ctx_readonly(void *arg) ...@@ -1208,8 +1206,8 @@ static int igt_ctx_readonly(void *arg)
goto out_unlock; goto out_unlock;
} }
ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt; vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm;
if (!ppgtt || !ppgtt->vm.has_read_only) { if (!vm || !vm->has_read_only) {
err = 0; err = 0;
goto out_unlock; goto out_unlock;
} }
...@@ -1244,7 +1242,7 @@ static int igt_ctx_readonly(void *arg) ...@@ -1244,7 +1242,7 @@ static int igt_ctx_readonly(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id, engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err); yesno(!!ctx->vm), err);
goto out_unlock; goto out_unlock;
} }
...@@ -1288,7 +1286,7 @@ static int igt_ctx_readonly(void *arg) ...@@ -1288,7 +1286,7 @@ static int igt_ctx_readonly(void *arg)
static int check_scratch(struct i915_gem_context *ctx, u64 offset) static int check_scratch(struct i915_gem_context *ctx, u64 offset)
{ {
struct drm_mm_node *node = struct drm_mm_node *node =
__drm_mm_interval_first(&ctx->ppgtt->vm.mm, __drm_mm_interval_first(&ctx->vm->mm,
offset, offset + sizeof(u32) - 1); offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset) if (!node || node->start > offset)
return 0; return 0;
...@@ -1336,7 +1334,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1336,7 +1334,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64); __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err;
...@@ -1433,7 +1431,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1433,7 +1431,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj); i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err;
...@@ -1542,11 +1540,11 @@ static int igt_vm_isolation(void *arg) ...@@ -1542,11 +1540,11 @@ static int igt_vm_isolation(void *arg)
} }
/* We can only test vm isolation, if the vm are distinct */ /* We can only test vm isolation, if the vm are distinct */
if (ctx_a->ppgtt == ctx_b->ppgtt) if (ctx_a->vm == ctx_b->vm)
goto out_unlock; goto out_unlock;
vm_total = ctx_a->ppgtt->vm.total; vm_total = ctx_a->vm->total;
GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE; vm_total -= I915_GTT_PAGE_SIZE;
wakeref = intel_runtime_pm_get(i915); wakeref = intel_runtime_pm_get(i915);
......
...@@ -48,7 +48,8 @@ mock_context(struct drm_i915_private *i915, ...@@ -48,7 +48,8 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt) if (!ppgtt)
goto err_put; goto err_put;
__set_ppgtt(ctx, ppgtt); __set_ppgtt(ctx, &ppgtt->vm);
i915_vm_put(&ppgtt->vm);
} }
return ctx; return ctx;
......
...@@ -1505,7 +1505,7 @@ __execlists_context_pin(struct intel_context *ce, ...@@ -1505,7 +1505,7 @@ __execlists_context_pin(struct intel_context *ce,
void *vaddr; void *vaddr;
int ret; int ret;
GEM_BUG_ON(!ce->gem_context->ppgtt); GEM_BUG_ON(!ce->gem_context->vm);
ret = execlists_context_deferred_alloc(ce, engine); ret = execlists_context_deferred_alloc(ce, engine);
if (ret) if (ret)
...@@ -1621,7 +1621,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) ...@@ -1621,7 +1621,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
static int emit_pdps(struct i915_request *rq) static int emit_pdps(struct i915_request *rq)
{ {
const struct intel_engine_cs * const engine = rq->engine; const struct intel_engine_cs * const engine = rq->engine;
struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt; struct i915_hw_ppgtt * const ppgtt =
i915_vm_to_ppgtt(rq->gem_context->vm);
int err, i; int err, i;
u32 *cs; u32 *cs;
...@@ -1694,7 +1695,7 @@ static int execlists_request_alloc(struct i915_request *request) ...@@ -1694,7 +1695,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/ */
/* Unconditionally invalidate GPU caches and TLBs. */ /* Unconditionally invalidate GPU caches and TLBs. */
if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm)) if (i915_vm_is_4lvl(request->gem_context->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE); ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else else
ret = emit_pdps(request); ret = emit_pdps(request);
...@@ -2824,7 +2825,7 @@ static void execlists_init_reg_state(u32 *regs, ...@@ -2824,7 +2825,7 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_ring *ring) struct intel_ring *ring)
{ {
struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt; struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
bool rcs = engine->class == RENDER_CLASS; bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base; u32 base = engine->mmio_base;
......
...@@ -1330,23 +1330,23 @@ static void ring_context_destroy(struct kref *ref) ...@@ -1330,23 +1330,23 @@ static void ring_context_destroy(struct kref *ref)
static int __context_pin_ppgtt(struct i915_gem_context *ctx) static int __context_pin_ppgtt(struct i915_gem_context *ctx)
{ {
struct i915_hw_ppgtt *ppgtt; struct i915_address_space *vm;
int err = 0; int err = 0;
ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt; vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
if (ppgtt) if (vm)
err = gen6_ppgtt_pin(ppgtt); err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
return err; return err;
} }
static void __context_unpin_ppgtt(struct i915_gem_context *ctx) static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
{ {
struct i915_hw_ppgtt *ppgtt; struct i915_address_space *vm;
ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt; vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
if (ppgtt) if (vm)
gen6_ppgtt_unpin(ppgtt); gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
} }
static int __context_pin(struct intel_context *ce) static int __context_pin(struct intel_context *ce)
...@@ -1704,14 +1704,16 @@ static int switch_context(struct i915_request *rq) ...@@ -1704,14 +1704,16 @@ static int switch_context(struct i915_request *rq)
{ {
struct intel_engine_cs *engine = rq->engine; struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *ctx = rq->gem_context; struct i915_gem_context *ctx = rq->gem_context;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; struct i915_address_space *vm =
ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm;
unsigned int unwind_mm = 0; unsigned int unwind_mm = 0;
u32 hw_flags = 0; u32 hw_flags = 0;
int ret, i; int ret, i;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (ppgtt) { if (vm) {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
int loops; int loops;
/* /*
...@@ -1758,7 +1760,7 @@ static int switch_context(struct i915_request *rq) ...@@ -1758,7 +1760,7 @@ static int switch_context(struct i915_request *rq)
goto err_mm; goto err_mm;
} }
if (ppgtt) { if (vm) {
ret = engine->emit_flush(rq, EMIT_INVALIDATE); ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret) if (ret)
goto err_mm; goto err_mm;
...@@ -1801,7 +1803,7 @@ static int switch_context(struct i915_request *rq) ...@@ -1801,7 +1803,7 @@ static int switch_context(struct i915_request *rq)
err_mm: err_mm:
if (unwind_mm) if (unwind_mm)
ppgtt->pd_dirty_engines |= unwind_mm; i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
err: err:
return ret; return ret;
} }
......
...@@ -128,8 +128,7 @@ static struct i915_request * ...@@ -128,8 +128,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine) hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{ {
struct drm_i915_private *i915 = h->i915; struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm = struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq = NULL; struct i915_request *rq = NULL;
struct i915_vma *hws, *vma; struct i915_vma *hws, *vma;
unsigned int flags; unsigned int flags;
...@@ -1354,8 +1353,8 @@ static int igt_reset_evict_ppgtt(void *arg) ...@@ -1354,8 +1353,8 @@ static int igt_reset_evict_ppgtt(void *arg)
} }
err = 0; err = 0;
if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ if (ctx->vm) /* aliasing == global gtt locking, covered above */
err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm, err = __igt_reset_evict_vma(i915, ctx->vm,
evict_vma, EXEC_OBJECT_WRITE); evict_vma, EXEC_OBJECT_WRITE);
out: out:
......
...@@ -1090,7 +1090,7 @@ static int smoke_submit(struct preempt_smoke *smoke, ...@@ -1090,7 +1090,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
int err = 0; int err = 0;
if (batch) { if (batch) {
vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL); vma = i915_vma_instance(batch, ctx->vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
......
...@@ -358,7 +358,7 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx) ...@@ -358,7 +358,7 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err_obj; goto err_obj;
...@@ -442,7 +442,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, ...@@ -442,7 +442,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
int err = 0, i, v; int err = 0, i, v;
u32 *cs, *results; u32 *cs, *results;
scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1); scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch)) if (IS_ERR(scratch))
return PTR_ERR(scratch); return PTR_ERR(scratch);
...@@ -925,7 +925,7 @@ static int live_isolated_whitelist(void *arg) ...@@ -925,7 +925,7 @@ static int live_isolated_whitelist(void *arg)
if (!intel_engines_has_context_isolation(i915)) if (!intel_engines_has_context_isolation(i915))
return 0; return 0;
if (!i915->kernel_context->ppgtt) if (!i915->kernel_context->vm)
return 0; return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) { for (i = 0; i < ARRAY_SIZE(client); i++) {
...@@ -937,14 +937,14 @@ static int live_isolated_whitelist(void *arg) ...@@ -937,14 +937,14 @@ static int live_isolated_whitelist(void *arg)
goto err; goto err;
} }
client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024); client[i].scratch[0] = create_scratch(c->vm, 1024);
if (IS_ERR(client[i].scratch[0])) { if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]); err = PTR_ERR(client[i].scratch[0]);
kernel_context_close(c); kernel_context_close(c);
goto err; goto err;
} }
client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024); client[i].scratch[1] = create_scratch(c->vm, 1024);
if (IS_ERR(client[i].scratch[1])) { if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]); err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0); i915_vma_unpin_and_release(&client[i].scratch[0], 0);
......
...@@ -368,7 +368,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, ...@@ -368,7 +368,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_vgpu_mm *mm = workload->shadow_mm; struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0; int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
...@@ -1130,7 +1130,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) ...@@ -1130,7 +1130,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, s->shadow[0]->gem_context->ppgtt); i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id) for_each_engine(engine, vgpu->gvt->dev_priv, id)
intel_context_unpin(s->shadow[id]); intel_context_unpin(s->shadow[id]);
...@@ -1195,7 +1195,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1195,7 +1195,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
i915_context_ppgtt_root_save(s, ctx->ppgtt); i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, i) { for_each_engine(engine, vgpu->gvt->dev_priv, i) {
struct intel_context *ce; struct intel_context *ce;
...@@ -1238,7 +1238,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1238,7 +1238,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
return 0; return 0;
out_shadow_ctx: out_shadow_ctx:
i915_context_ppgtt_root_restore(s, ctx->ppgtt); i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, i) { for_each_engine(engine, vgpu->gvt->dev_priv, i) {
if (IS_ERR(s->shadow[i])) if (IS_ERR(s->shadow[i]))
break; break;
......
...@@ -427,7 +427,7 @@ static void print_context_stats(struct seq_file *m, ...@@ -427,7 +427,7 @@ static void print_context_stats(struct seq_file *m,
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) { if (!IS_ERR_OR_NULL(ctx->file_priv)) {
struct file_stats stats = { .vm = &ctx->ppgtt->vm, }; struct file_stats stats = { .vm = ctx->vm, };
struct drm_file *file = ctx->file_priv->file; struct drm_file *file = ctx->file_priv->file;
struct task_struct *task; struct task_struct *task;
char name[80]; char name[80];
......
...@@ -2658,12 +2658,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, ...@@ -2658,12 +2658,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags); struct drm_gem_object *gem_obj, int flags);
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
return container_of(vm, struct i915_hw_ppgtt, vm);
}
/* i915_gem_fence_reg.c */ /* i915_gem_fence_reg.c */
struct drm_i915_fence_reg * struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv); i915_reserve_fence(struct drm_i915_private *dev_priv);
......
...@@ -483,6 +483,8 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) ...@@ -483,6 +483,8 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
static void i915_address_space_init(struct i915_address_space *vm, int subclass) static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{ {
kref_init(&vm->ref);
/* /*
* The vm->mutex must be reclaim safe (for use in the shrinker). * The vm->mutex must be reclaim safe (for use in the shrinker).
* Do a dummy acquire now under fs_reclaim so that any allocation * Do a dummy acquire now under fs_reclaim so that any allocation
...@@ -1230,9 +1232,8 @@ static int gen8_init_scratch(struct i915_address_space *vm) ...@@ -1230,9 +1232,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
*/ */
if (vm->has_read_only && if (vm->has_read_only &&
vm->i915->kernel_context && vm->i915->kernel_context &&
vm->i915->kernel_context->ppgtt) { vm->i915->kernel_context->vm) {
struct i915_address_space *clone = struct i915_address_space *clone = vm->i915->kernel_context->vm;
&vm->i915->kernel_context->ppgtt->vm;
GEM_BUG_ON(!clone->has_read_only); GEM_BUG_ON(!clone->has_read_only);
...@@ -1591,8 +1592,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) ...@@ -1591,8 +1592,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
static void ppgtt_init(struct drm_i915_private *i915, static void ppgtt_init(struct drm_i915_private *i915,
struct i915_hw_ppgtt *ppgtt) struct i915_hw_ppgtt *ppgtt)
{ {
kref_init(&ppgtt->ref);
ppgtt->vm.i915 = i915; ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev; ppgtt->vm.dma = &i915->drm.pdev->dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
...@@ -2272,21 +2271,23 @@ static void ppgtt_destroy_vma(struct i915_address_space *vm) ...@@ -2272,21 +2271,23 @@ static void ppgtt_destroy_vma(struct i915_address_space *vm)
} }
} }
void i915_ppgtt_release(struct kref *kref) void i915_vm_release(struct kref *kref)
{ {
struct i915_hw_ppgtt *ppgtt = struct i915_address_space *vm =
container_of(kref, struct i915_hw_ppgtt, ref); container_of(kref, struct i915_address_space, ref);
trace_i915_ppgtt_release(&ppgtt->vm); GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
ppgtt_destroy_vma(&ppgtt->vm); ppgtt_destroy_vma(vm);
GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list)); GEM_BUG_ON(!list_empty(&vm->bound_list));
GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); GEM_BUG_ON(!list_empty(&vm->unbound_list));
ppgtt->vm.cleanup(&ppgtt->vm); vm->cleanup(vm);
i915_address_space_fini(&ppgtt->vm); i915_address_space_fini(vm);
kfree(ppgtt);
kfree(vm);
} }
/* Certain Gen5 chipsets require require idling the GPU before /* Certain Gen5 chipsets require require idling the GPU before
...@@ -2788,7 +2789,7 @@ static int init_aliasing_ppgtt(struct drm_i915_private *i915) ...@@ -2788,7 +2789,7 @@ static int init_aliasing_ppgtt(struct drm_i915_private *i915)
return 0; return 0;
err_ppgtt: err_ppgtt:
i915_ppgtt_put(ppgtt); i915_vm_put(&ppgtt->vm);
return err; return err;
} }
...@@ -2801,7 +2802,7 @@ static void fini_aliasing_ppgtt(struct drm_i915_private *i915) ...@@ -2801,7 +2802,7 @@ static void fini_aliasing_ppgtt(struct drm_i915_private *i915)
if (!ppgtt) if (!ppgtt)
return; return;
i915_ppgtt_put(ppgtt); i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
......
...@@ -293,6 +293,8 @@ struct pagestash { ...@@ -293,6 +293,8 @@ struct pagestash {
}; };
struct i915_address_space { struct i915_address_space {
struct kref ref;
struct drm_mm mm; struct drm_mm mm;
struct drm_i915_private *i915; struct drm_i915_private *i915;
struct device *dma; struct device *dma;
...@@ -412,7 +414,6 @@ struct i915_ggtt { ...@@ -412,7 +414,6 @@ struct i915_ggtt {
struct i915_hw_ppgtt { struct i915_hw_ppgtt {
struct i915_address_space vm; struct i915_address_space vm;
struct kref ref;
intel_engine_mask_t pd_dirty_engines; intel_engine_mask_t pd_dirty_engines;
union { union {
...@@ -582,10 +583,19 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) ...@@ -582,10 +583,19 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
static inline struct i915_ggtt * static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm) i915_vm_to_ggtt(struct i915_address_space *vm)
{ {
BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
GEM_BUG_ON(!i915_is_ggtt(vm)); GEM_BUG_ON(!i915_is_ggtt(vm));
return container_of(vm, struct i915_ggtt, vm); return container_of(vm, struct i915_ggtt, vm);
} }
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
BUILD_BUG_ON(offsetof(struct i915_hw_ppgtt, vm));
GEM_BUG_ON(i915_is_ggtt(vm));
return container_of(vm, struct i915_hw_ppgtt, vm);
}
#define INTEL_MAX_PPAT_ENTRIES 8 #define INTEL_MAX_PPAT_ENTRIES 8
#define INTEL_PPAT_PERFECT_MATCH (~0U) #define INTEL_PPAT_PERFECT_MATCH (~0U)
...@@ -628,18 +638,19 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); ...@@ -628,18 +638,19 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) static inline struct i915_address_space *
i915_vm_get(struct i915_address_space *vm)
{ {
kref_get(&ppgtt->ref); kref_get(&vm->ref);
return ppgtt; return vm;
} }
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) void i915_vm_release(struct kref *kref);
static inline void i915_vm_put(struct i915_address_space *vm)
{ {
if (ppgtt) kref_put(&vm->ref, i915_vm_release);
kref_put(&ppgtt->ref, i915_ppgtt_release);
} }
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
......
...@@ -1431,7 +1431,7 @@ static void gem_record_rings(struct i915_gpu_state *error) ...@@ -1431,7 +1431,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct i915_gem_context *ctx = request->gem_context; struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring; struct intel_ring *ring;
ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm; ee->vm = ctx->vm ?: &ggtt->vm;
record_context(&ee->context, ctx); record_context(&ee->context, ctx);
......
...@@ -977,7 +977,7 @@ DECLARE_EVENT_CLASS(i915_context, ...@@ -977,7 +977,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index; __entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx; __entry->ctx = ctx;
__entry->hw_id = ctx->hw_id; __entry->hw_id = ctx->hw_id;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL; __entry->vm = ctx->vm;
), ),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u", TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
......
...@@ -209,7 +209,7 @@ static int igt_ppgtt_alloc(void *arg) ...@@ -209,7 +209,7 @@ static int igt_ppgtt_alloc(void *arg)
err_ppgtt_cleanup: err_ppgtt_cleanup:
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
i915_ppgtt_put(ppgtt); i915_vm_put(&ppgtt->vm);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
return err; return err;
} }
...@@ -1021,7 +1021,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, ...@@ -1021,7 +1021,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_ppgtt_put(ppgtt); i915_vm_put(&ppgtt->vm);
out_unlock: out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
...@@ -1251,7 +1251,6 @@ static int exercise_mock(struct drm_i915_private *i915, ...@@ -1251,7 +1251,6 @@ static int exercise_mock(struct drm_i915_private *i915,
{ {
const u64 limit = totalram_pages() << PAGE_SHIFT; const u64 limit = totalram_pages() << PAGE_SHIFT;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
int err; int err;
...@@ -1259,10 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915, ...@@ -1259,10 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
ppgtt = ctx->ppgtt; err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
GEM_BUG_ON(!ppgtt);
err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
mock_context_close(ctx); mock_context_close(ctx);
return err; return err;
......
...@@ -754,8 +754,7 @@ static int live_empty_request(void *arg) ...@@ -754,8 +754,7 @@ static int live_empty_request(void *arg)
static struct i915_vma *recursive_batch(struct drm_i915_private *i915) static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{ {
struct i915_gem_context *ctx = i915->kernel_context; struct i915_gem_context *ctx = i915->kernel_context;
struct i915_address_space *vm = struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915); const int gen = INTEL_GEN(i915);
struct i915_vma *vma; struct i915_vma *vma;
......
...@@ -38,7 +38,7 @@ static bool assert_vma(struct i915_vma *vma, ...@@ -38,7 +38,7 @@ static bool assert_vma(struct i915_vma *vma,
{ {
bool ok = true; bool ok = true;
if (vma->vm != &ctx->ppgtt->vm) { if (vma->vm != ctx->vm) {
pr_err("VMA created with wrong VM\n"); pr_err("VMA created with wrong VM\n");
ok = false; ok = false;
} }
...@@ -113,7 +113,7 @@ static int create_vmas(struct drm_i915_private *i915, ...@@ -113,7 +113,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) { list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) { for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) { list_for_each_entry(ctx, contexts, link) {
struct i915_address_space *vm = &ctx->ppgtt->vm; struct i915_address_space *vm = ctx->vm;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
......
...@@ -89,17 +89,16 @@ igt_spinner_create_request(struct igt_spinner *spin, ...@@ -89,17 +89,16 @@ igt_spinner_create_request(struct igt_spinner *spin,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
u32 arbitration_command) u32 arbitration_command)
{ {
struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_request *rq = NULL; struct i915_request *rq = NULL;
struct i915_vma *hws, *vma; struct i915_vma *hws, *vma;
u32 *batch; u32 *batch;
int err; int err;
vma = i915_vma_instance(spin->obj, vm, NULL); vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return ERR_CAST(vma); return ERR_CAST(vma);
hws = i915_vma_instance(spin->hws, vm, NULL); hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
if (IS_ERR(hws)) if (IS_ERR(hws))
return ERR_CAST(hws); return ERR_CAST(hws);
......
...@@ -65,7 +65,6 @@ mock_ppgtt(struct drm_i915_private *i915, ...@@ -65,7 +65,6 @@ mock_ppgtt(struct drm_i915_private *i915,
if (!ppgtt) if (!ppgtt)
return NULL; return NULL;
kref_init(&ppgtt->ref);
ppgtt->vm.i915 = i915; ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV); ppgtt->vm.file = ERR_PTR(-ENODEV);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment