Commit cc337560 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use 0 for the unordered context

Since commit 078dec33 ("dma-buf: add dma_fence_get_stub") the 0
fence context became an impossible match as it is used for an always
signaled fence. We can simplify our timeline tracking by knowing that 0
always means no match.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819184404.24200-1-chris@chris-wilson.co.uk
Link: https://patchwork.freedesktop.org/patch/msgid/20190819175109.5241-1-chris@chris-wilson.co.uk
parent e1d7b66b
...@@ -134,8 +134,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -134,8 +134,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
dma_fence_init(&clflush->dma, dma_fence_init(&clflush->dma,
&i915_clflush_ops, &i915_clflush_ops,
&clflush_lock, &clflush_lock,
to_i915(obj->base.dev)->mm.unordered_timeline, 0, 0);
0);
i915_sw_fence_init(&clflush->wait, i915_clflush_notify); i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
clflush->obj = i915_gem_object_get(obj); clflush->obj = i915_gem_object_get(obj);
......
...@@ -267,7 +267,6 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, ...@@ -267,7 +267,6 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
struct i915_page_sizes *page_sizes, struct i915_page_sizes *page_sizes,
u32 value) u32 value)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct clear_pages_work *work; struct clear_pages_work *work;
struct i915_sleeve *sleeve; struct i915_sleeve *sleeve;
int err; int err;
...@@ -290,11 +289,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, ...@@ -290,11 +289,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker); init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
dma_fence_init(&work->dma, dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
&clear_pages_work_ops,
&fence_lock,
i915->mm.unordered_timeline,
0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify); i915_sw_fence_init(&work->wait, clear_pages_work_notify);
i915_gem_object_lock(obj); i915_gem_object_lock(obj);
......
...@@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj) ...@@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
i915_sw_fence_init(&stub->chain, stub_notify); i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock, dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
to_i915(obj->base.dev)->mm.unordered_timeline, 0, 0);
0);
if (i915_sw_fence_await_reservation(&stub->chain, if (i915_sw_fence_await_reservation(&stub->chain,
obj->base.resv, NULL, obj->base.resv, NULL,
......
...@@ -748,8 +748,6 @@ struct i915_gem_mm { ...@@ -748,8 +748,6 @@ struct i915_gem_mm {
*/ */
struct workqueue_struct *userptr_wq; struct workqueue_struct *userptr_wq;
u64 unordered_timeline;
/** Bit 6 swizzling required for X tiling */ /** Bit 6 swizzling required for X tiling */
u32 bit_6_swizzle_x; u32 bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */ /** Bit 6 swizzling required for Y tiling */
......
...@@ -1411,8 +1411,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -1411,8 +1411,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->page_sizes = mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K; I915_GTT_PAGE_SIZE_4K;
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
intel_timelines_init(dev_priv); intel_timelines_init(dev_priv);
ret = i915_gem_init_userptr(dev_priv); ret = i915_gem_init_userptr(dev_priv);
......
...@@ -915,7 +915,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) ...@@ -915,7 +915,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
continue; continue;
/* Squash repeated waits to the same timelines */ /* Squash repeated waits to the same timelines */
if (fence->context != rq->i915->mm.unordered_timeline && if (fence->context &&
intel_timeline_sync_is_later(rq->timeline, fence)) intel_timeline_sync_is_later(rq->timeline, fence))
continue; continue;
...@@ -929,7 +929,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) ...@@ -929,7 +929,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
return ret; return ret;
/* Record the latest fence used against each timeline */ /* Record the latest fence used against each timeline */
if (fence->context != rq->i915->mm.unordered_timeline) if (fence->context)
intel_timeline_sync_set(rq->timeline, fence); intel_timeline_sync_set(rq->timeline, fence);
} while (--nchild); } while (--nchild);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment