Commit 8a1a49f9 authored by Daniel Vetter's avatar Daniel Vetter Committed by Chris Wilson

drm/i915: move flushing list processing to i915_retire_commands

... instead of threading flush_domains through the execbuf code to
i915_add_request.

With this change 2 small cleanups are possible (likewise the majority
of the patch):

- The flush_domains parameter of i915_add_request is always 0. Drop it
  and the corresponding logic.
- Ditto for the seqno param of i915_gem_process_flushing_list.
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent a6910434
...@@ -991,12 +991,15 @@ int i915_gpu_idle(struct drm_device *dev); ...@@ -991,12 +991,15 @@ int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev); int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev, uint32_t i915_add_request(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
uint32_t flush_domains,
struct intel_ring_buffer *ring); struct intel_ring_buffer *ring);
int i915_do_wait_request(struct drm_device *dev, int i915_do_wait_request(struct drm_device *dev,
uint32_t seqno, int interruptible, uint32_t seqno,
struct intel_ring_buffer *ring); bool interruptible,
struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
void i915_gem_process_flushing_list(struct drm_device *dev,
uint32_t flush_domains,
struct intel_ring_buffer *ring);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write); int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
......
...@@ -1570,9 +1570,9 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) ...@@ -1570,9 +1570,9 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
i915_verify_inactive(dev, __FILE__, __LINE__); i915_verify_inactive(dev, __FILE__, __LINE__);
} }
static void void
i915_gem_process_flushing_list(struct drm_device *dev, i915_gem_process_flushing_list(struct drm_device *dev,
uint32_t flush_domains, uint32_t seqno, uint32_t flush_domains,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -1590,7 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, ...@@ -1590,7 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
obj->write_domain = 0; obj->write_domain = 0;
list_del_init(&obj_priv->gpu_write_list); list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, seqno, ring); i915_gem_object_move_to_active(obj, 0, ring);
/* update the fence lru list */ /* update the fence lru list */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
...@@ -1608,8 +1608,9 @@ i915_gem_process_flushing_list(struct drm_device *dev, ...@@ -1608,8 +1608,9 @@ i915_gem_process_flushing_list(struct drm_device *dev,
} }
uint32_t uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv, i915_add_request(struct drm_device *dev,
uint32_t flush_domains, struct intel_ring_buffer *ring) struct drm_file *file_priv,
struct intel_ring_buffer *ring)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_file_private *i915_file_priv = NULL; struct drm_i915_file_private *i915_file_priv = NULL;
...@@ -1624,7 +1625,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, ...@@ -1624,7 +1625,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (request == NULL) if (request == NULL)
return 0; return 0;
seqno = ring->add_request(dev, ring, file_priv, flush_domains); seqno = ring->add_request(dev, ring, file_priv, 0);
request->seqno = seqno; request->seqno = seqno;
request->ring = ring; request->ring = ring;
...@@ -1639,12 +1640,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, ...@@ -1639,12 +1640,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
INIT_LIST_HEAD(&request->client_list); INIT_LIST_HEAD(&request->client_list);
} }
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our request.
*/
if (flush_domains != 0)
i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
if (!dev_priv->mm.suspended) { if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
if (was_empty) if (was_empty)
...@@ -1659,7 +1654,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, ...@@ -1659,7 +1654,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* Ensures that all commands in the ring are finished * Ensures that all commands in the ring are finished
* before signalling the CPU * before signalling the CPU
*/ */
static uint32_t static void
i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
{ {
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
...@@ -1670,7 +1665,6 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) ...@@ -1670,7 +1665,6 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
ring->flush(dev, ring, ring->flush(dev, ring,
I915_GEM_DOMAIN_COMMAND, flush_domains); I915_GEM_DOMAIN_COMMAND, flush_domains);
return flush_domains;
} }
/** /**
...@@ -1837,7 +1831,7 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1837,7 +1831,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
int int
i915_do_wait_request(struct drm_device *dev, uint32_t seqno, i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
int interruptible, struct intel_ring_buffer *ring) bool interruptible, struct intel_ring_buffer *ring)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier; u32 ier;
...@@ -1846,7 +1840,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, ...@@ -1846,7 +1840,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
BUG_ON(seqno == 0); BUG_ON(seqno == 0);
if (seqno == dev_priv->next_seqno) { if (seqno == dev_priv->next_seqno) {
seqno = i915_add_request(dev, NULL, 0, ring); seqno = i915_add_request(dev, NULL, ring);
if (seqno == 0) if (seqno == 0)
return -ENOMEM; return -ENOMEM;
} }
...@@ -1934,17 +1928,6 @@ i915_gem_flush(struct drm_device *dev, ...@@ -1934,17 +1928,6 @@ i915_gem_flush(struct drm_device *dev,
dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
invalidate_domains, invalidate_domains,
flush_domains); flush_domains);
/* Associate any objects on the flushing list matching the write
* domain we're flushing with the next request.
*/
if (flush_domains != 0) {
i915_gem_process_flushing_list(dev, flush_domains, 0,
&dev_priv->render_ring);
if (HAS_BSD(dev))
i915_gem_process_flushing_list(dev, flush_domains, 0,
&dev_priv->bsd_ring);
}
} }
/** /**
...@@ -2078,24 +2061,23 @@ i915_gpu_idle(struct drm_device *dev) ...@@ -2078,24 +2061,23 @@ i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */ /* Flush everything onto the inactive list. */
i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
seqno1 = i915_add_request(dev, NULL, 0, seqno1 = i915_add_request(dev, NULL, &dev_priv->render_ring);
&dev_priv->render_ring);
if (seqno1 == 0) if (seqno1 == 0)
return -ENOMEM; return -ENOMEM;
ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
if (ret)
return ret;
if (HAS_BSD(dev)) { if (HAS_BSD(dev)) {
seqno2 = i915_add_request(dev, NULL, 0, seqno2 = i915_add_request(dev, NULL, &dev_priv->bsd_ring);
&dev_priv->bsd_ring);
if (seqno2 == 0) if (seqno2 == 0)
return -ENOMEM; return -ENOMEM;
ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
if (ret) if (ret)
return ret; return ret;
} }
return ret; return 0;
} }
int int
...@@ -2641,7 +2623,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) ...@@ -2641,7 +2623,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */ /* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain; old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain); i915_gem_flush(dev, 0, obj->write_domain);
if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) if (i915_add_request(dev, NULL, obj_priv->ring) == 0)
return -ENOMEM; return -ENOMEM;
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
...@@ -3564,7 +3546,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -3564,7 +3546,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_relocation_entry *relocs = NULL; struct drm_i915_gem_relocation_entry *relocs = NULL;
int ret = 0, ret2, i, pinned = 0; int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset; uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index; uint32_t seqno, reloc_index;
int pin_tries, flips; int pin_tries, flips;
struct intel_ring_buffer *ring = NULL; struct intel_ring_buffer *ring = NULL;
...@@ -3780,13 +3762,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -3780,13 +3762,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
if (dev_priv->render_ring.outstanding_lazy_request) { if (dev_priv->render_ring.outstanding_lazy_request) {
(void)i915_add_request(dev, file_priv, 0, (void)i915_add_request(dev, file_priv, &dev_priv->render_ring);
&dev_priv->render_ring);
dev_priv->render_ring.outstanding_lazy_request = false; dev_priv->render_ring.outstanding_lazy_request = false;
} }
if (dev_priv->bsd_ring.outstanding_lazy_request) { if (dev_priv->bsd_ring.outstanding_lazy_request) {
(void)i915_add_request(dev, file_priv, 0, (void)i915_add_request(dev, file_priv, &dev_priv->bsd_ring);
&dev_priv->bsd_ring);
dev_priv->bsd_ring.outstanding_lazy_request = false; dev_priv->bsd_ring.outstanding_lazy_request = false;
} }
...@@ -3835,7 +3815,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -3835,7 +3815,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* Ensure that the commands in the batch buffer are * Ensure that the commands in the batch buffer are
* finished before the interrupt fires * finished before the interrupt fires
*/ */
flush_domains = i915_retire_commands(dev, ring); i915_retire_commands(dev, ring);
i915_verify_inactive(dev, __FILE__, __LINE__); i915_verify_inactive(dev, __FILE__, __LINE__);
...@@ -3846,7 +3826,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -3846,7 +3826,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* *some* interrupts representing completion of buffers that we can * *some* interrupts representing completion of buffers that we can
* wait on when trying to clear up gtt space). * wait on when trying to clear up gtt space).
*/ */
seqno = i915_add_request(dev, file_priv, flush_domains, ring); seqno = i915_add_request(dev, file_priv, ring);
BUG_ON(seqno == 0); BUG_ON(seqno == 0);
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i]; struct drm_gem_object *obj = object_list[i];
...@@ -4244,7 +4224,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ...@@ -4244,7 +4224,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/ */
if (obj->write_domain) { if (obj->write_domain) {
i915_gem_flush(dev, 0, obj->write_domain); i915_gem_flush(dev, 0, obj->write_domain);
(void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); (void)i915_add_request(dev, file_priv, obj_priv->ring);
} }
/* Update the active list for the hardware's current position. /* Update the active list for the hardware's current position.
......
...@@ -230,7 +230,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) ...@@ -230,7 +230,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
ADVANCE_LP_RING(); ADVANCE_LP_RING();
overlay->last_flip_req = overlay->last_flip_req =
i915_add_request(dev, NULL, 0, &dev_priv->render_ring); i915_add_request(dev, NULL, &dev_priv->render_ring);
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return -ENOMEM; return -ENOMEM;
...@@ -269,7 +269,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay, ...@@ -269,7 +269,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
ADVANCE_LP_RING(); ADVANCE_LP_RING();
overlay->last_flip_req = overlay->last_flip_req =
i915_add_request(dev, NULL, 0, &dev_priv->render_ring); i915_add_request(dev, NULL, &dev_priv->render_ring);
} }
static int intel_overlay_wait_flip(struct intel_overlay *overlay) static int intel_overlay_wait_flip(struct intel_overlay *overlay)
...@@ -301,7 +301,7 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) ...@@ -301,7 +301,7 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
ADVANCE_LP_RING(); ADVANCE_LP_RING();
overlay->last_flip_req = overlay->last_flip_req =
i915_add_request(dev, NULL, 0, &dev_priv->render_ring); i915_add_request(dev, NULL, &dev_priv->render_ring);
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return -ENOMEM; return -ENOMEM;
...@@ -342,7 +342,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) ...@@ -342,7 +342,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
ADVANCE_LP_RING(); ADVANCE_LP_RING();
overlay->last_flip_req = overlay->last_flip_req =
i915_add_request(dev, NULL, 0, &dev_priv->render_ring); i915_add_request(dev, NULL, &dev_priv->render_ring);
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return -ENOMEM; return -ENOMEM;
...@@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) ...@@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
ADVANCE_LP_RING(); ADVANCE_LP_RING();
overlay->last_flip_req = overlay->last_flip_req =
i915_add_request(dev, NULL, 0, &dev_priv->render_ring); i915_add_request(dev, NULL, &dev_priv->render_ring);
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return -ENOMEM; return -ENOMEM;
...@@ -409,7 +409,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, ...@@ -409,7 +409,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
if (overlay->last_flip_req == 0) { if (overlay->last_flip_req == 0) {
overlay->last_flip_req = overlay->last_flip_req =
i915_add_request(dev, NULL, 0, &dev_priv->render_ring); i915_add_request(dev, NULL, &dev_priv->render_ring);
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return -ENOMEM; return -ENOMEM;
} }
...@@ -439,8 +439,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, ...@@ -439,8 +439,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
OUT_RING(MI_NOOP); OUT_RING(MI_NOOP);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
overlay->last_flip_req = i915_add_request(dev, NULL, overlay->last_flip_req =
0, &dev_priv->render_ring); i915_add_request(dev, NULL,
&dev_priv->render_ring);
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return -ENOMEM; return -ENOMEM;
......
...@@ -116,6 +116,8 @@ render_ring_flush(struct drm_device *dev, ...@@ -116,6 +116,8 @@ render_ring_flush(struct drm_device *dev,
intel_ring_emit(dev, ring, MI_NOOP); intel_ring_emit(dev, ring, MI_NOOP);
intel_ring_advance(dev, ring); intel_ring_advance(dev, ring);
} }
i915_gem_process_flushing_list(dev, flush_domains, ring);
} }
static unsigned int render_ring_get_head(struct drm_device *dev, static unsigned int render_ring_get_head(struct drm_device *dev,
...@@ -384,6 +386,8 @@ bsd_ring_flush(struct drm_device *dev, ...@@ -384,6 +386,8 @@ bsd_ring_flush(struct drm_device *dev,
intel_ring_emit(dev, ring, MI_FLUSH); intel_ring_emit(dev, ring, MI_FLUSH);
intel_ring_emit(dev, ring, MI_NOOP); intel_ring_emit(dev, ring, MI_NOOP);
intel_ring_advance(dev, ring); intel_ring_advance(dev, ring);
i915_gem_process_flushing_list(dev, flush_domains, ring);
} }
static inline unsigned int bsd_ring_get_head(struct drm_device *dev, static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment