Commit 59c94b9d authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Replace opencoded i915_gem_object_pin_map()

As we have a pin_map interface, that knows how to flush the data to the
device, use it. The only downside is that we keep the kmap around, as
once acquired we keep the mapping cached until the object's backing
store is released.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200708173748.32734-2-chris@chris-wilson.co.uk
parent 09137e94
...@@ -3880,7 +3880,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) ...@@ -3880,7 +3880,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
&wa_ctx->per_ctx }; &wa_ctx->per_ctx };
wa_bb_func_t wa_bb_fn[2]; wa_bb_func_t wa_bb_fn[2];
struct page *page;
void *batch, *batch_ptr; void *batch, *batch_ptr;
unsigned int i; unsigned int i;
int ret; int ret;
...@@ -3916,14 +3915,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) ...@@ -3916,14 +3915,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret; return ret;
} }
page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
batch = batch_ptr = kmap_atomic(page);
/* /*
* Emit the two workaround batch buffers, recording the offset from the * Emit the two workaround batch buffers, recording the offset from the
* start of the workaround batch buffer object for each and their * start of the workaround batch buffer object for each and their
* respective sizes. * respective sizes.
*/ */
batch_ptr = batch;
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch; wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
...@@ -3935,10 +3934,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) ...@@ -3935,10 +3934,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
batch_ptr = wa_bb_fn[i](engine, batch_ptr); batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
} }
GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
i915_gem_object_unpin_map(wa_ctx->vma->obj);
kunmap_atomic(batch);
if (ret) if (ret)
lrc_destroy_wa_ctx(engine); lrc_destroy_wa_ctx(engine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment