Commit d7f46fc4 authored by Ben Widawsky's avatar Ben Widawsky Committed by Daniel Vetter

drm/i915: Make pin count per VMA

Signed-off-by: default avatarBen Widawsky <ben@bwidawsk.net>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 685987c6
...@@ -100,7 +100,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj) ...@@ -100,7 +100,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{ {
if (obj->user_pin_count > 0) if (obj->user_pin_count > 0)
return "P"; return "P";
else if (obj->pin_count > 0) else if (i915_gem_obj_is_pinned(obj))
return "p"; return "p";
else else
return " "; return " ";
...@@ -125,6 +125,8 @@ static void ...@@ -125,6 +125,8 @@ static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{ {
struct i915_vma *vma; struct i915_vma *vma;
int pin_count = 0;
seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
&obj->base, &obj->base,
get_pin_flag(obj), get_pin_flag(obj),
...@@ -141,8 +143,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -141,8 +143,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name) if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name); seq_printf(m, " (name: %d)", obj->base.name);
if (obj->pin_count) list_for_each_entry(vma, &obj->vma_list, vma_link)
seq_printf(m, " (pinned x %d)", obj->pin_count); if (vma->pin_count > 0)
pin_count++;
seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display) if (obj->pin_display)
seq_printf(m, " (display)"); seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE) if (obj->fence_reg != I915_FENCE_REG_NONE)
...@@ -439,7 +443,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) ...@@ -439,7 +443,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
total_obj_size = total_gtt_size = count = 0; total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (list == PINNED_LIST && obj->pin_count == 0) if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
continue; continue;
seq_puts(m, " "); seq_puts(m, " ");
...@@ -2843,7 +2847,7 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -2843,7 +2847,7 @@ i915_drop_caches_set(void *data, u64 val)
list_for_each_entry(vm, &dev_priv->vm_list, global_link) { list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, x, &vm->inactive_list, list_for_each_entry_safe(vma, x, &vm->inactive_list,
mm_list) { mm_list) {
if (vma->obj->pin_count) if (vma->pin_count)
continue; continue;
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
......
...@@ -651,6 +651,19 @@ struct i915_vma { ...@@ -651,6 +651,19 @@ struct i915_vma {
unsigned long exec_handle; unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry; struct drm_i915_gem_exec_object2 *exec_entry;
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
* (via user_pin_count), execbuffer (objects are not allowed multiple
* times for the same batchbuffer), and the framebuffer code. When
* switching/pageflipping, the framebuffer code has at most two buffers
* pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits.
*/
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
}; };
struct i915_ctx_hang_stats { struct i915_ctx_hang_stats {
...@@ -1617,18 +1630,6 @@ struct drm_i915_gem_object { ...@@ -1617,18 +1630,6 @@ struct drm_i915_gem_object {
*/ */
unsigned int fence_dirty:1; unsigned int fence_dirty:1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
* (via user_pin_count), execbuffer (objects are not allowed multiple
* times for the same batchbuffer), and the framebuffer code. When
* switching/pageflipping, the framebuffer code has at most two buffers
* pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/** /**
* Is the object at the current location in the gtt mappable and * Is the object at the current location in the gtt mappable and
* fenceable? Used to avoid costly recalculations. * fenceable? Used to avoid costly recalculations.
...@@ -2005,7 +2006,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -2005,7 +2006,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
bool map_and_fenceable, bool map_and_fenceable,
bool nonblocking); bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj); void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
...@@ -2168,6 +2169,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, ...@@ -2168,6 +2169,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm); struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->pin_count > 0)
return true;
return false;
}
/* Some GGTT VM helpers */ /* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \ #define obj_to_ggtt(obj) \
......
...@@ -204,7 +204,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -204,7 +204,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0; pinned = 0;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count) if (i915_gem_obj_is_pinned(obj))
pinned += i915_gem_obj_ggtt_size(obj); pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -651,7 +651,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -651,7 +651,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
} }
out_unpin: out_unpin:
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
out: out:
return ret; return ret;
} }
...@@ -1418,7 +1418,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1418,7 +1418,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unpin: unpin:
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
out: out:
...@@ -2721,7 +2721,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2721,7 +2721,7 @@ int i915_vma_unbind(struct i915_vma *vma)
return 0; return 0;
} }
if (obj->pin_count) if (vma->pin_count)
return -EBUSY; return -EBUSY;
BUG_ON(obj->pages == NULL); BUG_ON(obj->pages == NULL);
...@@ -2785,7 +2785,7 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) ...@@ -2785,7 +2785,7 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
if (!i915_gem_obj_ggtt_bound(obj)) if (!i915_gem_obj_ggtt_bound(obj))
return 0; return 0;
if (obj->pin_count) if (i915_gem_obj_to_ggtt(obj)->pin_count)
return -EBUSY; return -EBUSY;
BUG_ON(obj->pages == NULL); BUG_ON(obj->pages == NULL);
...@@ -3486,7 +3486,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3486,7 +3486,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (obj->cache_level == cache_level) if (obj->cache_level == cache_level)
return 0; return 0;
if (obj->pin_count) { if (i915_gem_obj_is_pinned(obj)) {
DRM_DEBUG("can not change the cache level of pinned objects\n"); DRM_DEBUG("can not change the cache level of pinned objects\n");
return -EBUSY; return -EBUSY;
} }
...@@ -3646,7 +3646,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) ...@@ -3646,7 +3646,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
* subtracting the potential reference by the user, any pin_count * subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine. * remains, it must be due to another use by the display engine.
*/ */
return obj->pin_count - !!obj->user_pin_count; return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
} }
/* /*
...@@ -3720,7 +3720,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3720,7 +3720,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
void void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
{ {
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
obj->pin_display = is_pin_display(obj); obj->pin_display = is_pin_display(obj);
} }
...@@ -3853,18 +3853,18 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3853,18 +3853,18 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm); vma = i915_gem_obj_to_vma(obj, vm);
if (vma) { if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if ((alignment && if ((alignment &&
vma->node.start & (alignment - 1)) || vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) { (map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count, WARN(vma->pin_count,
"bo is already pinned with incorrect alignment:" "bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n", " obj->map_and_fenceable=%d\n",
...@@ -3893,19 +3893,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3893,19 +3893,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (!obj->has_global_gtt_mapping && map_and_fenceable) if (!obj->has_global_gtt_mapping && map_and_fenceable)
i915_gem_gtt_bind_object(obj, obj->cache_level); i915_gem_gtt_bind_object(obj, obj->cache_level);
obj->pin_count++; i915_gem_obj_to_vma(obj, vm)->pin_count++;
obj->pin_mappable |= map_and_fenceable; obj->pin_mappable |= map_and_fenceable;
return 0; return 0;
} }
void void
i915_gem_object_unpin(struct drm_i915_gem_object *obj) i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
{ {
BUG_ON(obj->pin_count == 0); struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
BUG_ON(!i915_gem_obj_bound_any(obj));
if (--obj->pin_count == 0) BUG_ON(!vma);
BUG_ON(vma->pin_count == 0);
BUG_ON(!i915_gem_obj_ggtt_bound(obj));
if (--vma->pin_count == 0)
obj->pin_mappable = false; obj->pin_mappable = false;
} }
...@@ -3989,7 +3992,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ...@@ -3989,7 +3992,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count--; obj->user_pin_count--;
if (obj->user_pin_count == 0) { if (obj->user_pin_count == 0) {
obj->pin_filp = NULL; obj->pin_filp = NULL;
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
} }
out: out:
...@@ -4069,7 +4072,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -4069,7 +4072,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto unlock; goto unlock;
} }
if (obj->pin_count) { if (i915_gem_obj_is_pinned(obj)) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -4178,12 +4181,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -4178,12 +4181,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->phys_obj) if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj); i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
/* NB: 0 or 1 elements */ /* NB: 0 or 1 elements */
WARN_ON(!list_empty(&obj->vma_list) && WARN_ON(!list_empty(&obj->vma_list) &&
!list_is_singular(&obj->vma_list)); !list_is_singular(&obj->vma_list));
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
int ret = i915_vma_unbind(vma); int ret;
vma->pin_count = 0;
ret = i915_vma_unbind(vma);
if (WARN_ON(ret == -ERESTARTSYS)) { if (WARN_ON(ret == -ERESTARTSYS)) {
bool was_interruptible; bool was_interruptible;
...@@ -4963,7 +4968,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -4963,7 +4968,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
if (obj->active) if (obj->active)
continue; continue;
if (obj->pin_count == 0 && obj->pages_pin_count == 0) if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
......
...@@ -241,7 +241,7 @@ static int create_default_context(struct drm_i915_private *dev_priv) ...@@ -241,7 +241,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
return 0; return 0;
err_unpin: err_unpin:
i915_gem_object_unpin(ctx->obj); i915_gem_object_ggtt_unpin(ctx->obj);
err_destroy: err_destroy:
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
return ret; return ret;
...@@ -300,11 +300,11 @@ void i915_gem_context_fini(struct drm_device *dev) ...@@ -300,11 +300,11 @@ void i915_gem_context_fini(struct drm_device *dev)
if (dev_priv->ring[RCS].last_context == dctx) { if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */ /* Fake switch to NULL context */
WARN_ON(dctx->obj->active); WARN_ON(dctx->obj->active);
i915_gem_object_unpin(dctx->obj); i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx); i915_gem_context_unreference(dctx);
} }
i915_gem_object_unpin(dctx->obj); i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx); i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].default_context = NULL; dev_priv->ring[RCS].default_context = NULL;
dev_priv->ring[RCS].last_context = NULL; dev_priv->ring[RCS].last_context = NULL;
...@@ -412,7 +412,7 @@ static int do_switch(struct i915_hw_context *to) ...@@ -412,7 +412,7 @@ static int do_switch(struct i915_hw_context *to)
u32 hw_flags = 0; u32 hw_flags = 0;
int ret, i; int ret, i;
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); BUG_ON(from != NULL && from->obj != NULL && !i915_gem_obj_is_pinned(from->obj));
if (from == to && !to->remap_slice) if (from == to && !to->remap_slice)
return 0; return 0;
...@@ -428,7 +428,7 @@ static int do_switch(struct i915_hw_context *to) ...@@ -428,7 +428,7 @@ static int do_switch(struct i915_hw_context *to)
* XXX: We need a real interface to do this instead of trickery. */ * XXX: We need a real interface to do this instead of trickery. */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false); ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) { if (ret) {
i915_gem_object_unpin(to->obj); i915_gem_object_ggtt_unpin(to->obj);
return ret; return ret;
} }
...@@ -440,7 +440,7 @@ static int do_switch(struct i915_hw_context *to) ...@@ -440,7 +440,7 @@ static int do_switch(struct i915_hw_context *to)
ret = mi_set_context(ring, to, hw_flags); ret = mi_set_context(ring, to, hw_flags);
if (ret) { if (ret) {
i915_gem_object_unpin(to->obj); i915_gem_object_ggtt_unpin(to->obj);
return ret; return ret;
} }
...@@ -476,7 +476,7 @@ static int do_switch(struct i915_hw_context *to) ...@@ -476,7 +476,7 @@ static int do_switch(struct i915_hw_context *to)
BUG_ON(from->obj->ring != ring); BUG_ON(from->obj->ring != ring);
/* obj is kept alive until the next request by its active ref */ /* obj is kept alive until the next request by its active ref */
i915_gem_object_unpin(from->obj); i915_gem_object_ggtt_unpin(from->obj);
i915_gem_context_unreference(from); i915_gem_context_unreference(from);
} }
......
...@@ -34,7 +34,8 @@ ...@@ -34,7 +34,8 @@
static bool static bool
mark_free(struct i915_vma *vma, struct list_head *unwind) mark_free(struct i915_vma *vma, struct list_head *unwind)
{ {
if (vma->obj->pin_count) /* Freeing up memory requires no VMAs are pinned */
if (i915_gem_obj_is_pinned(vma->obj))
return false; return false;
if (WARN_ON(!list_empty(&vma->exec_list))) if (WARN_ON(!list_empty(&vma->exec_list)))
...@@ -186,7 +187,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) ...@@ -186,7 +187,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
} }
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->obj->pin_count == 0) if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma)); WARN_ON(i915_vma_unbind(vma));
return 0; return 0;
......
...@@ -566,7 +566,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) ...@@ -566,7 +566,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
i915_gem_object_unpin_fence(obj); i915_gem_object_unpin_fence(obj);
if (entry->flags & __EXEC_OBJECT_HAS_PIN) if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj); vma->pin_count--;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
} }
...@@ -923,7 +923,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, ...@@ -923,7 +923,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain) { if (obj->base.write_domain) {
obj->dirty = 1; obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring); obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */ /* check for potential scanout */
if (i915_gem_obj_ggtt_bound(obj) &&
i915_gem_obj_to_ggtt(obj)->pin_count)
intel_mark_fb_busy(obj, ring); intel_mark_fb_busy(obj, ring);
} }
......
...@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (obj->pin_count || obj->framebuffer_references) { if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base); drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY; return -EBUSY;
} }
......
...@@ -578,7 +578,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -578,7 +578,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->write_domain = obj->base.write_domain; err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg; err->fence_reg = obj->fence_reg;
err->pinned = 0; err->pinned = 0;
if (obj->pin_count > 0) if (i915_gem_obj_is_pinned(obj))
err->pinned = 1; err->pinned = 1;
if (obj->user_pin_count > 0) if (obj->user_pin_count > 0)
err->pinned = -1; err->pinned = -1;
...@@ -611,7 +611,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, ...@@ -611,7 +611,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
int i = 0; int i = 0;
list_for_each_entry(obj, head, global_list) { list_for_each_entry(obj, head, global_list) {
if (obj->pin_count == 0) if (!i915_gem_obj_is_pinned(obj))
continue; continue;
capture_bo(err++, obj); capture_bo(err++, obj);
...@@ -875,7 +875,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, ...@@ -875,7 +875,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
i++; i++;
error->active_bo_count[ndx] = i; error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count) if (i915_gem_obj_is_pinned(obj))
i++; i++;
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
......
...@@ -104,7 +104,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -104,7 +104,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
return 0; return 0;
out_unpin: out_unpin:
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
out_unref: out_unref:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
out: out:
...@@ -208,7 +208,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -208,7 +208,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
return 0; return 0;
out_unpin: out_unpin:
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
out_unlock: out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) ...@@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{ {
struct drm_i915_gem_object *obj = overlay->old_vid_bo; struct drm_i915_gem_object *obj = overlay->old_vid_bo;
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
overlay->old_vid_bo = NULL; overlay->old_vid_bo = NULL;
...@@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) ...@@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */ /* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo); BUG_ON(!overlay->vid_bo);
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
overlay->vid_bo = NULL; overlay->vid_bo = NULL;
...@@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return 0; return 0;
out_unpin: out_unpin:
i915_gem_object_unpin(new_bo); i915_gem_object_ggtt_unpin(new_bo);
return ret; return ret;
} }
...@@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev)
out_unpin_bo: out_unpin_bo:
if (!OVERLAY_NEEDS_PHYSICAL(dev)) if (!OVERLAY_NEEDS_PHYSICAL(dev))
i915_gem_object_unpin(reg_bo); i915_gem_object_ggtt_unpin(reg_bo);
out_free_bo: out_free_bo:
drm_gem_object_unreference(&reg_bo->base); drm_gem_object_unreference(&reg_bo->base);
out_free: out_free:
......
...@@ -3298,7 +3298,7 @@ intel_alloc_context_page(struct drm_device *dev) ...@@ -3298,7 +3298,7 @@ intel_alloc_context_page(struct drm_device *dev)
return ctx; return ctx;
err_unpin: err_unpin:
i915_gem_object_unpin(ctx); i915_gem_object_ggtt_unpin(ctx);
err_unref: err_unref:
drm_gem_object_unreference(&ctx->base); drm_gem_object_unreference(&ctx->base);
return NULL; return NULL;
...@@ -4166,13 +4166,13 @@ void ironlake_teardown_rc6(struct drm_device *dev) ...@@ -4166,13 +4166,13 @@ void ironlake_teardown_rc6(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->ips.renderctx) { if (dev_priv->ips.renderctx) {
i915_gem_object_unpin(dev_priv->ips.renderctx); i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
drm_gem_object_unreference(&dev_priv->ips.renderctx->base); drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
dev_priv->ips.renderctx = NULL; dev_priv->ips.renderctx = NULL;
} }
if (dev_priv->ips.pwrctx) { if (dev_priv->ips.pwrctx) {
i915_gem_object_unpin(dev_priv->ips.pwrctx); i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
dev_priv->ips.pwrctx = NULL; dev_priv->ips.pwrctx = NULL;
} }
......
...@@ -549,7 +549,7 @@ init_pipe_control(struct intel_ring_buffer *ring) ...@@ -549,7 +549,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
return 0; return 0;
err_unpin: err_unpin:
i915_gem_object_unpin(ring->scratch.obj); i915_gem_object_ggtt_unpin(ring->scratch.obj);
err_unref: err_unref:
drm_gem_object_unreference(&ring->scratch.obj->base); drm_gem_object_unreference(&ring->scratch.obj->base);
err: err:
...@@ -625,7 +625,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) ...@@ -625,7 +625,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 5) { if (INTEL_INFO(dev)->gen >= 5) {
kunmap(sg_page(ring->scratch.obj->pages->sgl)); kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_unpin(ring->scratch.obj); i915_gem_object_ggtt_unpin(ring->scratch.obj);
} }
drm_gem_object_unreference(&ring->scratch.obj->base); drm_gem_object_unreference(&ring->scratch.obj->base);
...@@ -1250,7 +1250,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) ...@@ -1250,7 +1250,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
return; return;
kunmap(sg_page(obj->pages->sgl)); kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL; ring->status_page.obj = NULL;
} }
...@@ -1290,7 +1290,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -1290,7 +1290,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
return 0; return 0;
err_unpin: err_unpin:
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
err_unref: err_unref:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
err: err:
...@@ -1387,7 +1387,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1387,7 +1387,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
err_unmap: err_unmap:
iounmap(ring->virtual_start); iounmap(ring->virtual_start);
err_unpin: err_unpin:
i915_gem_object_unpin(obj); i915_gem_object_ggtt_unpin(obj);
err_unref: err_unref:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
ring->obj = NULL; ring->obj = NULL;
...@@ -1415,7 +1415,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) ...@@ -1415,7 +1415,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
iounmap(ring->virtual_start); iounmap(ring->virtual_start);
i915_gem_object_unpin(ring->obj); i915_gem_object_ggtt_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base); drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL; ring->obj = NULL;
ring->preallocated_lazy_request = NULL; ring->preallocated_lazy_request = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment