Commit 5b8c8aec authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move frontbuffer CS write tracking from ggtt vma to object

I tried to avoid having to track the write for every VMA by only
tracking writes to the ggtt. However, for the purposes of frontbuffer
tracking this is insufficient as we need to invalidate around writes not
just to the the ggtt but all aliased ppgtt views of the framebuffer. By
moving the critical section to the object and only doing so for
framebuffer writes we can reduce the tracking even further by only
watching framebuffers and not vma.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161116190704.5293-1-chris@chris-wilson.co.ukTested-by: default avatarPaulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent d806e682
...@@ -3886,6 +3886,16 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -3886,6 +3886,16 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return err; return err;
} }
static void
frontbuffer_retire(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj, void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops) const struct drm_i915_gem_object_ops *ops)
{ {
...@@ -3903,6 +3913,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -3903,6 +3913,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->resv = &obj->__builtin_resv; obj->resv = &obj->__builtin_resv;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT; obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED; obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
......
...@@ -1276,9 +1276,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1276,9 +1276,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
list_move_tail(&vma->vm_link, &vma->vm->active_list); list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) { if (flags & EXEC_OBJECT_WRITE) {
i915_gem_active_set(&vma->last_write, req); if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
i915_gem_active_set(&obj->frontbuffer_write, req);
intel_fb_obj_invalidate(obj, ORIGIN_CS);
/* update for the implicit flush after a batch */ /* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
......
...@@ -103,6 +103,7 @@ struct drm_i915_gem_object { ...@@ -103,6 +103,7 @@ struct drm_i915_gem_object {
atomic_t frontbuffer_bits; atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */ unsigned int frontbuffer_ggtt_origin; /* write once */
struct i915_gem_active frontbuffer_write;
/** Current tiling stride for the object, if it's tiled. */ /** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride; unsigned int tiling_and_stride;
......
...@@ -886,8 +886,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -886,8 +886,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
for (i = 0; i < I915_NUM_ENGINES; i++) for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = __active_get_seqno(&vma->last_read[i]); err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
err->wseqno = __active_get_seqno(&vma->last_write); err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
err->engine = __active_get_engine_id(&vma->last_write); err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start; err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains; err->read_domains = obj->base.read_domains;
......
...@@ -68,16 +68,6 @@ i915_vma_retire(struct i915_gem_active *active, ...@@ -68,16 +68,6 @@ i915_vma_retire(struct i915_gem_active *active,
} }
} }
static void
i915_ggtt_retire__write(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
struct i915_vma *vma =
container_of(active, struct i915_vma, last_write);
intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
}
static struct i915_vma * static struct i915_vma *
__i915_vma_create(struct drm_i915_gem_object *obj, __i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm, struct i915_address_space *vm,
...@@ -96,8 +86,6 @@ __i915_vma_create(struct drm_i915_gem_object *obj, ...@@ -96,8 +86,6 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list); INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire); init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_write,
i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
init_request_active(&vma->last_fence, NULL); init_request_active(&vma->last_fence, NULL);
list_add(&vma->vm_link, &vm->unbound_list); list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm; vma->vm = vm;
......
...@@ -80,7 +80,6 @@ struct i915_vma { ...@@ -80,7 +80,6 @@ struct i915_vma {
unsigned int active; unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES]; struct i915_gem_active last_read[I915_NUM_ENGINES];
struct i915_gem_active last_write;
struct i915_gem_active last_fence; struct i915_gem_active last_fence;
/** /**
......
...@@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj, ...@@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is * until the rendering completes or a flip on this frontbuffer plane is
* scheduled. * scheduled.
*/ */
static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin) enum fb_op_origin origin)
{ {
unsigned int frontbuffer_bits; unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!frontbuffer_bits) if (!frontbuffer_bits)
return; return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits); __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
return true;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment