Commit ce44b0ea authored by Eric Anholt's avatar Eric Anholt Committed by Dave Airlie

drm/i915: Move flushing list cleanup from flush request retire to request emit.

obj_priv->write_domain is "write domain if the GPU went idle now", not
"write domain at this moment."  By postponing the clear, we confused the
concept, required more storage, and potentially emitted more flushes than
are required.
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent a7f014f2
......@@ -244,6 +244,10 @@ typedef struct drm_i915_private {
* List of objects currently involved in rendering from the
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
......@@ -253,6 +257,8 @@ typedef struct drm_i915_private {
* still have a write_domain which needs to be flushed before
* unbinding.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
......@@ -261,6 +267,8 @@ typedef struct drm_i915_private {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
......@@ -394,9 +402,6 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
/** Cache domains that were flushed at the start of the request. */
uint32_t flush_domains;
struct list_head list;
};
......
......@@ -532,7 +532,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
}
static void
i915_gem_object_move_to_active(struct drm_gem_object *obj)
i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
......@@ -546,8 +546,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list);
obj_priv->last_rendering_seqno = seqno;
}
static void
i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
BUG_ON(!obj_priv->active);
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
obj_priv->last_rendering_seqno = 0;
}
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
......@@ -562,6 +574,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
......@@ -610,10 +623,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->seqno = seqno;
request->emitted_jiffies = jiffies;
request->flush_domains = flush_domains;
was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
if (flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
}
}
if (was_empty && !dev_priv->mm.suspended)
schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
return seqno;
......@@ -676,30 +707,10 @@ i915_gem_retire_request(struct drm_device *dev,
__func__, request->seqno, obj);
#endif
if (obj->write_domain != 0) {
list_move_tail(&obj_priv->list,
&dev_priv->mm.flushing_list);
} else {
if (obj->write_domain != 0)
i915_gem_object_move_to_flushing(obj);
else
i915_gem_object_move_to_inactive(obj);
}
}
if (request->flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
/* Clear the write domain and activity from any buffers
* that are just waiting for a flush matching the one retired.
*/
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if (obj->write_domain & request->flush_domains) {
obj->write_domain = 0;
i915_gem_object_move_to_inactive(obj);
}
}
}
}
......@@ -896,17 +907,15 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
* create a new seqno to wait for.
*/
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
uint32_t write_domain = obj->write_domain;
uint32_t seqno, write_domain = obj->write_domain;
#if WATCH_BUF
DRM_INFO("%s: flushing object %p from write domain %08x\n",
__func__, obj, write_domain);
#endif
i915_gem_flush(dev, 0, write_domain);
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = i915_add_request(dev,
write_domain);
BUG_ON(obj_priv->last_rendering_seqno == 0);
seqno = i915_add_request(dev, write_domain);
i915_gem_object_move_to_active(obj, seqno);
#if WATCH_LRU
DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
#endif
......@@ -1927,10 +1936,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
struct drm_i915_gem_object *obj_priv = obj->driver_private;
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = seqno;
i915_gem_object_move_to_active(obj, seqno);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
......
......@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
DRM_PROC_PRINT(" %d @ %d %08x\n",
DRM_PROC_PRINT(" %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies),
gem_request->flush_domains);
(int) (jiffies - gem_request->emitted_jiffies));
}
if (len > request + offset)
return request;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment