Commit a679f58d authored by Chris Wilson's avatar Chris Wilson

drm/i915: Flush pages on acquisition

When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.

The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.

Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.

Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.

For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
parent 4daffb66
...@@ -2965,6 +2965,14 @@ i915_coherent_map_type(struct drm_i915_private *i915) ...@@ -2965,6 +2965,14 @@ i915_coherent_map_type(struct drm_i915_private *i915)
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type); enum i915_map_type type);
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size);
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
{
__i915_gem_object_flush_map(obj, 0, obj->base.size);
}
/** /**
* i915_gem_object_unpin_map - releases an earlier mapping * i915_gem_object_unpin_map - releases an earlier mapping
* @obj: the object to unmap * @obj: the object to unmap
......
...@@ -1713,6 +1713,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) ...@@ -1713,6 +1713,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 2 - Recognise WC as a separate cache domain so that we can flush the * 2 - Recognise WC as a separate cache domain so that we can flush the
* delayed writes via GTT before performing direct access via WC. * delayed writes via GTT before performing direct access via WC.
* *
* 3 - Remove implicit set-domain(GTT) and synchronisation on initial
* pagefault; swapin remains transparent.
*
* Restrictions: * Restrictions:
* *
* * snoopable objects cannot be accessed via the GTT. It can cause machine * * snoopable objects cannot be accessed via the GTT. It can cause machine
...@@ -1740,7 +1743,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) ...@@ -1740,7 +1743,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/ */
int i915_gem_mmap_gtt_version(void) int i915_gem_mmap_gtt_version(void)
{ {
return 2; return 3;
} }
static inline struct i915_ggtt_view static inline struct i915_ggtt_view
...@@ -1808,17 +1811,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) ...@@ -1808,17 +1811,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write); trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Try to flush the object off the GPU first without holding the lock.
* Upon acquiring the lock, we will perform our sanity checks and then
* repeat the flush holding the lock in the normal manner to catch cases
* where we are gazumped.
*/
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err;
ret = i915_gem_object_pin_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
goto err; goto err;
...@@ -1874,10 +1866,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) ...@@ -1874,10 +1866,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
goto err_unlock; goto err_unlock;
} }
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto err_unpin;
ret = i915_vma_pin_fence(vma); ret = i915_vma_pin_fence(vma);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
...@@ -2534,6 +2522,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -2534,6 +2522,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock); lockdep_assert_held(&obj->mm.lock);
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(pages);
obj->cache_dirty = false;
}
obj->mm.get_page.sg_pos = pages->sgl; obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0; obj->mm.get_page.sg_idx = 0;
...@@ -2735,6 +2731,33 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -2735,6 +2731,33 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto out_unlock; goto out_unlock;
} }
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size)
{
enum i915_map_type has_type;
void *ptr;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
offset, size, obj->base.size));
obj->mm.dirty = true;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
return;
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (has_type == I915_MAP_WC)
return;
drm_clflush_virt_range(ptr + offset, size);
if (size == obj->base.size) {
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
obj->cache_dirty = false;
}
}
static int static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg) const struct drm_i915_gem_pwrite *arg)
...@@ -4692,6 +4715,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -4692,6 +4715,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active; goto err_active;
engine->default_state = i915_gem_object_get(state->obj); engine->default_state = i915_gem_object_get(state->obj);
i915_gem_object_set_cache_coherency(engine->default_state,
I915_CACHE_LLC);
/* Check we can acquire the image of the context state */ /* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(engine->default_state, vaddr = i915_gem_object_pin_map(engine->default_state,
......
...@@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) ...@@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
} }
......
...@@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache) ...@@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
{ {
GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj); i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915); i915_gem_chipset_flush(cache->rq->i915);
i915_request_add(cache->rq); i915_request_add(cache->rq);
...@@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (IS_ERR(cmd)) if (IS_ERR(cmd))
return PTR_ERR(cmd); return PTR_ERR(cmd);
err = i915_gem_object_set_to_wc_domain(obj, false);
if (err)
goto err_unmap;
batch = i915_vma_instance(obj, vma->vm, NULL); batch = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(batch)) { if (IS_ERR(batch)) {
err = PTR_ERR(batch); err = PTR_ERR(batch);
......
...@@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so, ...@@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so,
drm_clflush_virt_range(d, i * sizeof(u32)); drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d); kunmap_atomic(d);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false); ret = 0;
out: out:
i915_gem_obj_finish_shmem_access(so->obj); i915_gem_obj_finish_shmem_access(so->obj);
return ret; return ret;
......
...@@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv) ...@@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto unlock; goto unlock;
} }
ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
if (ret)
goto err_unref;
/* PreHSW required 512K alignment, HSW requires 16M */ /* PreHSW required 512K alignment, HSW requires 16M */
vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
......
...@@ -528,9 +528,7 @@ static int init_status_page(struct intel_engine_cs *engine) ...@@ -528,9 +528,7 @@ static int init_status_page(struct intel_engine_cs *engine)
return PTR_ERR(obj); return PTR_ERR(obj);
} }
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
if (ret)
goto err;
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
......
...@@ -1248,6 +1248,30 @@ static void execlists_context_destroy(struct kref *kref) ...@@ -1248,6 +1248,30 @@ static void execlists_context_destroy(struct kref *kref)
intel_context_free(ce); intel_context_free(ce);
} }
static int __context_pin(struct i915_vma *vma)
{
unsigned int flags;
int err;
flags = PIN_GLOBAL | PIN_HIGH;
flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
return err;
vma->obj->pin_global++;
vma->obj->mm.dirty = true;
return 0;
}
static void __context_unpin(struct i915_vma *vma)
{
vma->obj->pin_global--;
__i915_vma_unpin(vma);
}
static void execlists_context_unpin(struct intel_context *ce) static void execlists_context_unpin(struct intel_context *ce)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -1276,31 +1300,8 @@ static void execlists_context_unpin(struct intel_context *ce) ...@@ -1276,31 +1300,8 @@ static void execlists_context_unpin(struct intel_context *ce)
intel_ring_unpin(ce->ring); intel_ring_unpin(ce->ring);
ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
i915_vma_unpin(ce->state); __context_unpin(ce->state);
}
static int __context_pin(struct i915_vma *vma)
{
unsigned int flags;
int err;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
err = i915_gem_object_set_to_wc_domain(vma->obj, true);
if (err)
return err;
}
flags = PIN_GLOBAL | PIN_HIGH;
flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
return i915_vma_pin(vma, 0, 0, flags);
} }
static void static void
...@@ -1361,7 +1362,6 @@ __execlists_context_pin(struct intel_context *ce, ...@@ -1361,7 +1362,6 @@ __execlists_context_pin(struct intel_context *ce,
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine); __execlists_update_reg_state(ce, engine);
ce->state->obj->pin_global++;
return 0; return 0;
unpin_ring: unpin_ring:
...@@ -1369,7 +1369,7 @@ __execlists_context_pin(struct intel_context *ce, ...@@ -1369,7 +1369,7 @@ __execlists_context_pin(struct intel_context *ce,
unpin_map: unpin_map:
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
unpin_vma: unpin_vma:
__i915_vma_unpin(ce->state); __context_unpin(ce->state);
err: err:
return ret; return ret;
} }
...@@ -2751,19 +2751,12 @@ populate_lr_context(struct intel_context *ce, ...@@ -2751,19 +2751,12 @@ populate_lr_context(struct intel_context *ce,
u32 *regs; u32 *regs;
int ret; int ret;
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
if (ret) {
DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
return ret;
}
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr); ret = PTR_ERR(vaddr);
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret; return ret;
} }
ctx_obj->mm.dirty = true;
if (engine->default_state) { if (engine->default_state) {
/* /*
...@@ -2798,7 +2791,11 @@ populate_lr_context(struct intel_context *ce, ...@@ -2798,7 +2791,11 @@ populate_lr_context(struct intel_context *ce,
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
ret = 0;
err_unpin_ctx: err_unpin_ctx:
__i915_gem_object_flush_map(ctx_obj,
LRC_HEADER_PAGES * PAGE_SIZE,
engine->context_size);
i915_gem_object_unpin_map(ctx_obj); i915_gem_object_unpin_map(ctx_obj);
return ret; return ret;
} }
......
...@@ -1195,15 +1195,6 @@ int intel_ring_pin(struct intel_ring *ring) ...@@ -1195,15 +1195,6 @@ int intel_ring_pin(struct intel_ring *ring)
else else
flags |= PIN_HIGH; flags |= PIN_HIGH;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
else
ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
if (unlikely(ret))
goto unpin_timeline;
}
ret = i915_vma_pin(vma, 0, 0, flags); ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret)) if (unlikely(ret))
goto unpin_timeline; goto unpin_timeline;
...@@ -1392,17 +1383,6 @@ static int __context_pin(struct intel_context *ce) ...@@ -1392,17 +1383,6 @@ static int __context_pin(struct intel_context *ce)
if (!vma) if (!vma)
return 0; return 0;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
if (err)
return err;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err) if (err)
return err; return err;
...@@ -1412,6 +1392,7 @@ static int __context_pin(struct intel_context *ce) ...@@ -1412,6 +1392,7 @@ static int __context_pin(struct intel_context *ce)
* it cannot reclaim the object until we release it. * it cannot reclaim the object until we release it.
*/ */
vma->obj->pin_global++; vma->obj->pin_global++;
vma->obj->mm.dirty = true;
return 0; return 0;
} }
...@@ -1446,6 +1427,24 @@ alloc_context_vma(struct intel_engine_cs *engine) ...@@ -1446,6 +1427,24 @@ alloc_context_vma(struct intel_engine_cs *engine)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
/*
* Try to make the context utilize L3 as well as LLC.
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*
* Snooping is required on non-llc platforms in execlist
* mode, but since all GGTT accesses use PAT entry 0 we
* get snooping anyway regardless of cache_level.
*
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
if (IS_IVYBRIDGE(i915))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
if (engine->default_state) { if (engine->default_state) {
void *defaults, *vaddr; void *defaults, *vaddr;
...@@ -1463,29 +1462,10 @@ alloc_context_vma(struct intel_engine_cs *engine) ...@@ -1463,29 +1462,10 @@ alloc_context_vma(struct intel_engine_cs *engine)
} }
memcpy(vaddr, defaults, engine->context_size); memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state); i915_gem_object_unpin_map(engine->default_state);
i915_gem_object_unpin_map(obj);
}
/* i915_gem_object_flush_map(obj);
* Try to make the context utilize L3 as well as LLC. i915_gem_object_unpin_map(obj);
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*
* Snooping is required on non-llc platforms in execlist
* mode, but since all GGTT accesses use PAT entry 0 we
* get snooping anyway regardless of cache_level.
*
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
if (IS_IVYBRIDGE(i915)) {
/* Ignore any error, regard it as a simple optimisation */
i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
} }
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
......
...@@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) ...@@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
err = i915_gem_object_set_to_wc_domain(obj, true);
if (err)
goto err;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) { if (IS_ERR(cmd)) {
err = PTR_ERR(cmd); err = PTR_ERR(cmd);
...@@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg) ...@@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg)
} }
*vaddr = 0xdeadbeaf; *vaddr = 0xdeadbeaf;
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
......
...@@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) ...@@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
offset += PAGE_SIZE; offset += PAGE_SIZE;
} }
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_gem_object_set_to_gtt_domain(obj, false);
...@@ -604,12 +605,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) ...@@ -604,12 +605,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
*cmd++ = upper_32_bits(vma->node.start); *cmd++ = upper_32_bits(vma->node.start);
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
vma = i915_vma_instance(obj, vma->vm, NULL); vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
...@@ -1202,12 +1200,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1202,12 +1200,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
} }
*cmd++ = value; *cmd++ = value;
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
...@@ -1299,11 +1294,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1299,11 +1294,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = result; *cmd++ = result;
} }
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false); i915_gem_object_flush_map(obj);
if (err) i915_gem_object_unpin_map(obj);
goto err;
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
......
...@@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg) ...@@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg)
goto err; goto err;
} }
memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE); memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
ptr = dma_buf_kmap(dmabuf, 1); ptr = dma_buf_kmap(dmabuf, 1);
......
...@@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg) ...@@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj); err = PTR_ERR(obj);
goto cleanup; goto cleanup;
} }
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects); quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
...@@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg) ...@@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj); err = PTR_ERR(obj);
goto cleanup; goto cleanup;
} }
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects); quirk_add(obj, &objects);
/* Neighbouring; same colour - should fit */ /* Neighbouring; same colour - should fit */
......
...@@ -619,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) ...@@ -619,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
} }
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false); i915_gem_chipset_flush(i915);
if (err)
goto err;
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
...@@ -777,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) ...@@ -777,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err) if (err)
goto err; goto err;
err = i915_gem_object_set_to_wc_domain(obj, true);
if (err)
goto err;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) { if (IS_ERR(cmd)) {
err = PTR_ERR(cmd); err = PTR_ERR(cmd);
...@@ -799,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) ...@@ -799,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start); *cmd++ = lower_32_bits(vma->node.start);
} }
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
i915_gem_chipset_flush(i915);
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
i915_gem_chipset_flush(i915);
return vma; return vma;
err: err:
......
...@@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) ...@@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
goto err_hws; goto err_hws;
} }
i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr); err = PTR_ERR(vaddr);
......
...@@ -70,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) ...@@ -70,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
goto err_hws; goto err_hws;
} }
i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC); i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr); err = PTR_ERR(vaddr);
......
...@@ -1018,12 +1018,9 @@ static int live_preempt_smoke(void *arg) ...@@ -1018,12 +1018,9 @@ static int live_preempt_smoke(void *arg)
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK; cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END; cs[n] = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch); i915_gem_object_unpin_map(smoke.batch);
err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
if (err)
goto err_batch;
for (n = 0; n < smoke.ncontext; n++) { for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915); smoke.contexts[n] = kernel_context(smoke.i915);
if (!smoke.contexts[n]) if (!smoke.contexts[n])
......
...@@ -90,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) ...@@ -90,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_obj; goto err_obj;
} }
memset(cs, 0xc5, PAGE_SIZE); memset(cs, 0xc5, PAGE_SIZE);
i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result); i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
...@@ -358,6 +359,7 @@ static struct i915_vma *create_scratch(struct i915_gem_context *ctx) ...@@ -358,6 +359,7 @@ static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
goto err_obj; goto err_obj;
} }
memset(ptr, 0xc5, PAGE_SIZE); memset(ptr, 0xc5, PAGE_SIZE);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
...@@ -551,6 +553,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, ...@@ -551,6 +553,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
*cs++ = MI_BATCH_BUFFER_END; *cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj); i915_gem_object_unpin_map(batch->obj);
i915_gem_chipset_flush(ctx->i915); i915_gem_chipset_flush(ctx->i915);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment