Commit 82d71e31 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Poison GTT scratch pages

Using a clear page for scratch means that we have relatively benign
errors in case it is accidentally used, but that can be rather too
benign for debugging. If we poison the scratch, ideally it quickly
results in an obvious error.

v2: Set each page individually just in case we are using highmem for our
scratch page.
v3: Pick a new scratch register as MI_STORE_REGISTER_MEM does not work
with GPR0 on gen7, unbelievably.
v4: Haswell still considers 3DPRIM a privileged register!
Suggested-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200124115133.53360-1-chris@chris-wilson.co.uk
parent 0ea60c1d
...@@ -1492,6 +1492,10 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1492,6 +1492,10 @@ static int write_to_scratch(struct i915_gem_context *ctx,
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
err = check_scratch(ctx_vm(ctx), offset);
if (err)
return err;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
...@@ -1528,10 +1532,6 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1528,10 +1532,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err) if (err)
goto out_vm; goto out_vm;
err = check_scratch(vm, offset);
if (err)
goto err_unpin;
rq = igt_request_alloc(ctx, engine); rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
...@@ -1575,64 +1575,95 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1575,64 +1575,95 @@ static int read_from_scratch(struct i915_gem_context *ctx,
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm; struct i915_address_space *vm;
const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100; const u32 result = 0x100;
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags;
u32 *cmd; u32 *cmd;
int err; int err;
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
err = check_scratch(ctx_vm(ctx), offset);
if (err)
return err;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
if (INTEL_GEN(i915) >= 8) { if (INTEL_GEN(i915) >= 8) {
const u32 GPR0 = engine->mmio_base + 0x600;
vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
goto out_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
*cmd++ = MI_LOAD_REGISTER_MEM_GEN8; *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
*cmd++ = RCS_GPR0; *cmd++ = GPR0;
*cmd++ = lower_32_bits(offset); *cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset); *cmd++ = upper_32_bits(offset);
*cmd++ = MI_STORE_REGISTER_MEM_GEN8; *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
*cmd++ = RCS_GPR0; *cmd++ = GPR0;
*cmd++ = result; *cmd++ = result;
*cmd++ = 0; *cmd++ = 0;
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
flags = 0;
} else { } else {
const u32 reg = engine->mmio_base + 0x420;
/* hsw: register access even to 3DPRIM! is protected */
vm = i915_vm_get(&engine->gt->ggtt->vm);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err)
goto out_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
*cmd++ = MI_LOAD_REGISTER_MEM; *cmd++ = MI_LOAD_REGISTER_MEM;
*cmd++ = RCS_GPR0; *cmd++ = reg;
*cmd++ = offset; *cmd++ = offset;
*cmd++ = MI_STORE_REGISTER_MEM; *cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
*cmd++ = RCS_GPR0; *cmd++ = reg;
*cmd++ = result; *cmd++ = vma->node.start + result;
} *cmd = MI_BATCH_BUFFER_END;
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(engine->gt); i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
vm = i915_gem_context_get_vm_rcu(ctx); flags = I915_DISPATCH_SECURE;
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_vm;
} }
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); intel_gt_chipset_flush(engine->gt);
if (err)
goto out_vm;
err = check_scratch(vm, offset);
if (err)
goto err_unpin;
rq = igt_request_alloc(ctx, engine); rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
...@@ -1640,7 +1671,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1640,7 +1671,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin; goto err_unpin;
} }
err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
if (err) if (err)
goto err_request; goto err_request;
...@@ -1686,6 +1717,39 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1686,6 +1717,39 @@ static int read_from_scratch(struct i915_gem_context *ctx,
return err; return err;
} }
static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
{
struct i915_address_space *vm;
struct page *page;
u32 *vaddr;
int err = 0;
vm = ctx_vm(ctx);
if (!vm)
return -ENODEV;
page = vm->scratch[0].base.page;
if (!page) {
pr_err("No scratch page!\n");
return -EINVAL;
}
vaddr = kmap(page);
if (!vaddr) {
pr_err("No (mappable) scratch page!\n");
return -EINVAL;
}
memcpy(out, vaddr, sizeof(*out));
if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
pr_err("Inconsistent initial state of scratch page!\n");
err = -EINVAL;
}
kunmap(page);
return err;
}
static int igt_vm_isolation(void *arg) static int igt_vm_isolation(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
...@@ -1696,6 +1760,7 @@ static int igt_vm_isolation(void *arg) ...@@ -1696,6 +1760,7 @@ static int igt_vm_isolation(void *arg)
I915_RND_STATE(prng); I915_RND_STATE(prng);
struct file *file; struct file *file;
u64 vm_total; u64 vm_total;
u32 expected;
int err; int err;
if (INTEL_GEN(i915) < 7) if (INTEL_GEN(i915) < 7)
...@@ -1730,6 +1795,15 @@ static int igt_vm_isolation(void *arg) ...@@ -1730,6 +1795,15 @@ static int igt_vm_isolation(void *arg)
if (ctx_vm(ctx_a) == ctx_vm(ctx_b)) if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
goto out_file; goto out_file;
/* Read the initial state of the scratch page */
err = check_scratch_page(ctx_a, &expected);
if (err)
goto out_file;
err = check_scratch_page(ctx_b, &expected);
if (err)
goto out_file;
vm_total = ctx_vm(ctx_a)->total; vm_total = ctx_vm(ctx_a)->total;
GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total); GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE; vm_total -= I915_GTT_PAGE_SIZE;
...@@ -1743,6 +1817,10 @@ static int igt_vm_isolation(void *arg) ...@@ -1743,6 +1817,10 @@ static int igt_vm_isolation(void *arg)
if (!intel_engine_can_store_dword(engine)) if (!intel_engine_can_store_dword(engine))
continue; continue;
/* Not all engines have their own GPR! */
if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS)
continue;
while (!__igt_timeout(end_time, NULL)) { while (!__igt_timeout(end_time, NULL)) {
u32 value = 0xc5c5c5c5; u32 value = 0xc5c5c5c5;
u64 offset; u64 offset;
...@@ -1760,7 +1838,7 @@ static int igt_vm_isolation(void *arg) ...@@ -1760,7 +1838,7 @@ static int igt_vm_isolation(void *arg)
if (err) if (err)
goto out_file; goto out_file;
if (value) { if (value != expected) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
engine->name, value, engine->name, value,
upper_32_bits(offset), upper_32_bits(offset),
......
...@@ -299,6 +299,25 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) ...@@ -299,6 +299,25 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
} }
static void poison_scratch_page(struct page *page, unsigned long size)
{
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
do {
void *vaddr;
vaddr = kmap(page);
memset(vaddr, POISON_FREE, PAGE_SIZE);
kunmap(page);
page = pfn_to_page(page_to_pfn(page) + 1);
size -= PAGE_SIZE;
} while (size);
}
int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{ {
unsigned long size; unsigned long size;
...@@ -331,6 +350,17 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) ...@@ -331,6 +350,17 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page)) if (unlikely(!page))
goto skip; goto skip;
/*
* Use a non-zero scratch page for debugging.
*
* We want a value that should be reasonably obvious
* to spot in the error state, while also causing a GPU hang
* if executed. We prefer using a clear page in production, so
* should it ever be accidentally used, the effect should be
* fairly benign.
*/
poison_scratch_page(page, size);
addr = dma_map_page_attrs(vm->dma, addr = dma_map_page_attrs(vm->dma,
page, 0, size, page, 0, size,
PCI_DMA_BIDIRECTIONAL, PCI_DMA_BIDIRECTIONAL,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment