Commit 2267f684 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Flush gen3 relocs harder, again

gen3 does not fully flush MI stores to memory on MI_FLUSH, such that a
subsequent read from e.g. the sampler can bypass the store and read the
stale value from memory. This is a serious issue when we are using MI
stores to rewrite the batches for relocation, as it means that the batch
is reading from random user/kernel memory. While it is particularly
sensitive [and detectable] for relocations, reading stale data at any
time is a worry.

Having started with a small number of delaying stores and doubling until
no more incoherency was seen over a few hours (with and without
background memory pressure), 32 was the magic number.

Note that it definitely doesn't fix the issue, merely adds a long delay
between requests, sufficient to mostly hide the problem, enough to raise
the mtbf to several hours. This is merely a stop gap.

v2: Follow more closer with the gen5 w/a and include some
post-invalidate flushes as well.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2018
References: a889580c ("drm/i915: Flush GPU relocs harder for gen3")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200612123949.7093-1-chris@chris-wilson.co.uk
parent d4b02a4c
...@@ -13,28 +13,25 @@ ...@@ -13,28 +13,25 @@
int gen2_emit_flush(struct i915_request *rq, u32 mode) int gen2_emit_flush(struct i915_request *rq, u32 mode)
{ {
unsigned int num_store_dw; unsigned int num_store_dw = 12;
u32 cmd, *cs; u32 cmd, *cs;
cmd = MI_FLUSH; cmd = MI_FLUSH;
num_store_dw = 0;
if (mode & EMIT_INVALIDATE) if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH; cmd |= MI_READ_FLUSH;
if (mode & EMIT_FLUSH)
num_store_dw = 4;
cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); cs = intel_ring_begin(rq, 2 + 4 * num_store_dw);
if (IS_ERR(cs)) if (IS_ERR(cs))
return PTR_ERR(cs); return PTR_ERR(cs);
*cs++ = cmd; *cs++ = cmd;
while (num_store_dw--) { while (num_store_dw--) {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cs++ = MI_STORE_DWORD_INDEX;
*cs++ = intel_gt_scratch_offset(rq->engine->gt, *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = 0; *cs++ = 0;
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
} }
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; *cs++ = cmd;
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
...@@ -142,38 +139,21 @@ int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode) ...@@ -142,38 +139,21 @@ int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
return 0; return 0;
} }
u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs) static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
int flush, int post)
{ {
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH; *cs++ = MI_FLUSH;
*cs++ = MI_STORE_DWORD_INDEX; while (flush--) {
*cs++ = I915_GEM_HWS_SEQNO_ADDR; *cs++ = MI_STORE_DWORD_INDEX;
*cs++ = rq->fence.seqno; *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT; }
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#define GEN5_WA_STORES 8 /* must be at least 1! */
u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
BUILD_BUG_ON(GEN5_WA_STORES < 1); while (post--) {
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX; *cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR; *cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno; *cs++ = rq->fence.seqno;
...@@ -186,7 +166,16 @@ u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) ...@@ -186,7 +166,16 @@ u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
return cs; return cs;
} }
#undef GEN5_WA_STORES
u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
return __gen2_emit_breadcrumb(rq, cs, 16, 8);
}
u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
return __gen2_emit_breadcrumb(rq, cs, 8, 8);
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT SZ_256K #define I830_BATCH_LIMIT SZ_256K
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment