Commit a0950768 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Discard some redundant cache domain flushes

Since commit a679f58d ("drm/i915: Flush pages on acquisition"), we
flush objects on acquire their pages and as such when we create an
object for the purpose of writing into it, we do not need to manually
flush.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614111053.25615-1-chris@chris-wilson.co.uk
parent 5e3fb2a5
...@@ -209,12 +209,6 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) ...@@ -209,12 +209,6 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
i915_gem_object_flush_map(obj); i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
goto err;
vma = i915_vma_instance(obj, vma->vm, NULL); vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
......
...@@ -368,12 +368,6 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx) ...@@ -368,12 +368,6 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (err) if (err)
goto err_obj; goto err_obj;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
goto err_obj;
return vma; return vma;
err_obj: err_obj:
......
...@@ -344,29 +344,20 @@ static void capture_logs_work(struct work_struct *work) ...@@ -344,29 +344,20 @@ static void capture_logs_work(struct work_struct *work)
static int guc_log_map(struct intel_guc_log *log) static int guc_log_map(struct intel_guc_log *log)
{ {
void *vaddr; void *vaddr;
int ret;
lockdep_assert_held(&log->relay.lock); lockdep_assert_held(&log->relay.lock);
if (!log->vma) if (!log->vma)
return -ENODEV; return -ENODEV;
i915_gem_object_lock(log->vma->obj);
ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
i915_gem_object_unlock(log->vma->obj);
if (ret)
return ret;
/* /*
* Create a WC (Uncached for read) vmalloc mapping of log * Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data * buffer pages, so that we can directly get the data
* (up-to-date) from memory. * (up-to-date) from memory.
*/ */
vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC); vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr))
DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
return PTR_ERR(vaddr); return PTR_ERR(vaddr);
}
log->relay.buf_addr = vaddr; log->relay.buf_addr = vaddr;
......
...@@ -1377,12 +1377,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) ...@@ -1377,12 +1377,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto out_free; goto out_free;
i915_gem_object_lock(overlay->reg_bo);
ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
i915_gem_object_unlock(overlay->reg_bo);
if (ret)
goto out_reg_bo;
memset_io(overlay->regs, 0, sizeof(struct overlay_registers)); memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(overlay->regs); update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs); update_reg_attrs(overlay, overlay->regs);
...@@ -1391,8 +1385,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) ...@@ -1391,8 +1385,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
DRM_INFO("Initialized overlay support.\n"); DRM_INFO("Initialized overlay support.\n");
return; return;
out_reg_bo:
i915_gem_object_put(overlay->reg_bo);
out_free: out_free:
kfree(overlay); kfree(overlay);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment