Commit aff43766 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin Committed by Daniel Vetter

drm/i915: Move flags describing VMA mappings into the VMA

If these flags are on the object level it will be more difficult to allow
for multiple VMAs per object.

v2: Simplification and cleanup after code review comments (Chris Wilson).
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 041df357
......@@ -116,7 +116,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
{
return obj->has_global_gtt_mapping ? "g" : " ";
return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
}
static void
......
......@@ -1856,8 +1856,6 @@ struct drm_i915_gem_object {
unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
......
......@@ -3701,7 +3701,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
vma->bind_vma(vma, cache_level,
obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
vma->bound & GLOBAL_BIND);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
......@@ -4097,7 +4097,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return PTR_ERR(vma);
}
if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
vma->pin_count++;
......
......@@ -522,6 +522,7 @@ static int do_switch(struct intel_engine_cs *ring,
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
struct i915_vma *vma;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
......@@ -571,11 +572,10 @@ static int do_switch(struct intel_engine_cs *ring,
if (ret)
goto unpin_out;
if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
if (!(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
GLOBAL_BIND);
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
......
......@@ -357,12 +357,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
struct i915_vma *vma =
list_first_entry(&target_i915_obj->vma_list,
typeof(*vma), vma_link);
vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
}
!(target_vma->bound & GLOBAL_BIND)))
target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
GLOBAL_BIND);
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
......
......@@ -1336,7 +1336,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*/
obj->has_global_gtt_mapping = 0;
vma->bound &= ~GLOBAL_BIND;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
}
......@@ -1533,7 +1533,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
vma->obj->has_global_gtt_mapping = 1;
vma->bound = GLOBAL_BIND;
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
......@@ -1552,7 +1552,7 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
BUG_ON(!i915_is_ggtt(vma->vm));
vma->obj->has_global_gtt_mapping = 0;
vma->bound = 0;
intel_gtt_clear_range(first, size);
}
......@@ -1580,24 +1580,24 @@ static void ggtt_bind_vma(struct i915_vma *vma,
* flags. At all other times, the GPU will use the aliasing PPGTT.
*/
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!obj->has_global_gtt_mapping ||
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
cache_level, flags);
obj->has_global_gtt_mapping = 1;
vma->bound |= GLOBAL_BIND;
}
}
if (dev_priv->mm.aliasing_ppgtt &&
(!obj->has_aliasing_ppgtt_mapping ||
(!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
vma->bound |= LOCAL_BIND;
}
}
......@@ -1607,21 +1607,21 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
if (obj->has_global_gtt_mapping) {
if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm,
vma->node.start,
obj->base.size,
true);
obj->has_global_gtt_mapping = 0;
vma->bound &= ~GLOBAL_BIND;
}
if (obj->has_aliasing_ppgtt_mapping) {
if (vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
obj->base.size,
true);
obj->has_aliasing_ppgtt_mapping = 0;
vma->bound &= ~LOCAL_BIND;
}
}
......@@ -1699,7 +1699,7 @@ int i915_gem_setup_global_gtt(struct drm_device *dev,
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
return ret;
}
obj->has_global_gtt_mapping = 1;
vma->bound |= GLOBAL_BIND;
}
dev_priv->gtt.base.start = start;
......
......@@ -123,6 +123,12 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
/** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0)
#define LOCAL_BIND (1<<1)
#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4;
/** This object's place on the active/inactive lists */
struct list_head mm_list;
......@@ -155,8 +161,6 @@ struct i915_vma {
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
......
......@@ -533,7 +533,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
}
}
obj->has_global_gtt_mapping = 1;
vma->bound |= GLOBAL_BIND;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
......
......@@ -565,6 +565,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
struct drm_i915_error_object *dst;
struct i915_vma *vma = NULL;
int num_pages;
bool use_ggtt;
int i = 0;
......@@ -585,16 +586,17 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
dst->gtt_offset = -1;
reloc_offset = dst->gtt_offset;
if (i915_is_ggtt(vm))
vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
i915_is_ggtt(vm) &&
src->has_global_gtt_mapping &&
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
vma && (vma->bound & GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
if (!src->has_global_gtt_mapping)
if (!(vma && vma->bound & GLOBAL_BIND))
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment