Commit c52b3b48 authored by Matthew Auld's avatar Matthew Auld

drm/i915/clflush: disallow on discrete

We seem to have an unfortunate issue where we arrive from:

    i915_gem_object_flush_if_display+0x86/0xd0 [i915]
    intel_user_framebuffer_dirty+0x1a/0x50 [i915]
    drm_mode_dirtyfb_ioctl+0xfb/0x1b0

which can be before the pages are populated(and pinned for display), and
so i915_gem_object_has_struct_page() might still return true, as per the
ttm backend. We could re-order the later get_pages() call here, but
since on discrete everything should already be coherent, with the
exception of the display engine, and even there display surfaces must be
allocated in device local-memory anyway, so there should in theory be no
conceivable reason to ever call i915_gem_clflush_object() on discrete.

References: https://gitlab.freedesktop.org/drm/intel/-/issues/4320Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211027161813.3094681-2-matthew.auld@intel.com
parent 3ea355b2
...@@ -69,6 +69,7 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj) ...@@ -69,6 +69,7 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags) unsigned int flags)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct clflush *clflush; struct clflush *clflush;
assert_object_held(obj); assert_object_held(obj);
...@@ -80,7 +81,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -80,7 +81,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* anything not backed by physical memory we consider to be always * anything not backed by physical memory we consider to be always
* coherent and not need clflushing. * coherent and not need clflushing.
*/ */
if (!i915_gem_object_has_struct_page(obj)) { if (!i915_gem_object_has_struct_page(obj) || IS_DGFX(i915)) {
obj->cache_dirty = false; obj->cache_dirty = false;
return false; return false;
} }
...@@ -105,7 +106,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -105,7 +106,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
if (clflush) { if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain, i915_sw_fence_await_reservation(&clflush->base.chain,
obj->base.resv, NULL, true, obj->base.resv, NULL, true,
i915_fence_timeout(to_i915(obj->base.dev)), i915_fence_timeout(i915),
I915_FENCE_GFP); I915_FENCE_GFP);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma); dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_fence_work_commit(&clflush->base); dma_fence_work_commit(&clflush->base);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment