Commit f343c5f6 authored by Ben Widawsky's avatar Ben Widawsky Committed by Daniel Vetter

drm/i915: Getter/setter for object attributes

Soon we want to gut a lot of our existing assumptions how many address
spaces an object can live in, and in doing so, embed the drm_mm_node in
the object (and later the VMA).

It's possible in the future we'll want to add more getter/setter
methods, but for now this is enough to enable the VMAs.

v2: Reworked commit message (Ben)
Added comments to the main functions (Ben)
sed -i "s/i915_gem_obj_set_color/i915_gem_obj_ggtt_set_color/" drivers/gpu/drm/i915/*.[ch]
sed -i "s/i915_gem_obj_bound/i915_gem_obj_ggtt_bound/" drivers/gpu/drm/i915/*.[ch]
sed -i "s/i915_gem_obj_size/i915_gem_obj_ggtt_size/" drivers/gpu/drm/i915/*.[ch]
sed -i "s/i915_gem_obj_offset/i915_gem_obj_ggtt_offset/" drivers/gpu/drm/i915/*.[ch]
(Daniel)

v3: Rebased on new reserve_node patch
Changed DRM_DEBUG_KMS to actually work (will need fixing later)
Signed-off-by: default avatarBen Widawsky <ben@bwidawsk.net>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 338710e7
...@@ -122,9 +122,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -122,9 +122,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", obj->pin_count); seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE) if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg); seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL) if (i915_gem_obj_ggtt_bound(obj))
seq_printf(m, " (gtt offset: %08x, size: %08x)", seq_printf(m, " (gtt offset: %08lx, size: %08x)",
obj->gtt_offset, (unsigned int)obj->gtt_space->size); i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
if (obj->stolen) if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start); seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) { if (obj->pin_mappable || obj->fault_mappable) {
...@@ -175,7 +175,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -175,7 +175,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
describe_obj(m, obj); describe_obj(m, obj);
seq_putc(m, '\n'); seq_putc(m, '\n');
total_obj_size += obj->base.size; total_obj_size += obj->base.size;
total_gtt_size += obj->gtt_space->size; total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++; count++;
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -187,10 +187,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -187,10 +187,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
#define count_objects(list, member) do { \ #define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \ list_for_each_entry(obj, list, member) { \
size += obj->gtt_space->size; \ size += i915_gem_obj_ggtt_size(obj); \
++count; \ ++count; \
if (obj->map_and_fenceable) { \ if (obj->map_and_fenceable) { \
mappable_size += obj->gtt_space->size; \ mappable_size += i915_gem_obj_ggtt_size(obj); \
++mappable_count; \ ++mappable_count; \
} \ } \
} \ } \
...@@ -209,7 +209,7 @@ static int per_file_stats(int id, void *ptr, void *data) ...@@ -209,7 +209,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++; stats->count++;
stats->total += obj->base.size; stats->total += obj->base.size;
if (obj->gtt_space) { if (i915_gem_obj_ggtt_bound(obj)) {
if (!list_empty(&obj->ring_list)) if (!list_empty(&obj->ring_list))
stats->active += obj->base.size; stats->active += obj->base.size;
else else
...@@ -267,11 +267,11 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -267,11 +267,11 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size = count = mappable_size = mappable_count = 0; size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->fault_mappable) { if (obj->fault_mappable) {
size += obj->gtt_space->size; size += i915_gem_obj_ggtt_size(obj);
++count; ++count;
} }
if (obj->pin_mappable) { if (obj->pin_mappable) {
mappable_size += obj->gtt_space->size; mappable_size += i915_gem_obj_ggtt_size(obj);
++mappable_count; ++mappable_count;
} }
if (obj->madv == I915_MADV_DONTNEED) { if (obj->madv == I915_MADV_DONTNEED) {
...@@ -333,7 +333,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) ...@@ -333,7 +333,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
describe_obj(m, obj); describe_obj(m, obj);
seq_putc(m, '\n'); seq_putc(m, '\n');
total_obj_size += obj->base.size; total_obj_size += obj->base.size;
total_gtt_size += obj->gtt_space->size; total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++; count++;
} }
...@@ -379,12 +379,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -379,12 +379,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
if (work->old_fb_obj) { if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj; struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj) if (obj)
seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
i915_gem_obj_ggtt_offset(obj));
} }
if (work->pending_flip_obj) { if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj; struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj) if (obj)
seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
i915_gem_obj_ggtt_offset(obj));
} }
} }
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
......
...@@ -1361,6 +1361,37 @@ struct drm_i915_gem_object { ...@@ -1361,6 +1361,37 @@ struct drm_i915_gem_object {
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/* Offset of the first PTE pointing to this object */
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
return o->gtt_space->start;
}
/* Whether or not this object is currently mapped by the translation tables */
static inline bool
i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
{
return o->gtt_space != NULL;
}
/* The size used in the translation tables may be larger than the actual size of
* the object on GEN2/GEN3 because of the way tiling is handled. See
* i915_gem_get_gtt_size() for more details.
*/
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
return o->gtt_space->size;
}
static inline void
i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
enum i915_cache_level color)
{
o->gtt_space->color = color;
}
/** /**
* Request queue structure. * Request queue structure.
* *
......
...@@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) ...@@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{ {
return obj->gtt_space && !obj->active; return i915_gem_obj_ggtt_bound(obj) && !obj->active;
} }
int int
...@@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count) if (obj->pin_count)
pinned += obj->gtt_space->size; pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->gtt.total; args->aper_size = dev_priv->gtt.total;
...@@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */ * anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE) if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1; needs_clflush = 1;
if (obj->gtt_space) { if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false); ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) if (ret)
return ret; return ret;
...@@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = to_user_ptr(args->data_ptr); user_data = to_user_ptr(args->data_ptr);
remain = args->size; remain = args->size;
offset = obj->gtt_offset + args->offset; offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
while (remain > 0) { while (remain > 0) {
/* Operation in this page /* Operation in this page
...@@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */ * right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE) if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1; needs_clflush_after = 1;
if (obj->gtt_space) { if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true); ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret) if (ret)
return ret; return ret;
...@@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true; obj->fault_mappable = true;
pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
page_offset; pfn >>= PAGE_SHIFT;
pfn += page_offset;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
...@@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL) if (obj->pages == NULL)
return 0; return 0;
BUG_ON(obj->gtt_space); BUG_ON(i915_gem_obj_ggtt_bound(obj));
if (obj->pages_pin_count) if (obj->pages_pin_count)
return -EBUSY; return -EBUSY;
...@@ -2117,8 +2118,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) ...@@ -2117,8 +2118,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
{ {
if (acthd >= obj->gtt_offset && if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < obj->gtt_offset + obj->base.size) acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return true; return true;
return false; return false;
...@@ -2176,11 +2177,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring, ...@@ -2176,11 +2177,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
if (ring->hangcheck.action != wait && if (ring->hangcheck.action != wait &&
i915_request_guilty(request, acthd, &inside)) { i915_request_guilty(request, acthd, &inside)) {
DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name, ring->name,
inside ? "inside" : "flushing", inside ? "inside" : "flushing",
request->batch_obj ? request->batch_obj ?
request->batch_obj->gtt_offset : 0, i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
request->ctx ? request->ctx->id : 0, request->ctx ? request->ctx->id : 0,
acthd); acthd);
...@@ -2592,7 +2593,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -2592,7 +2593,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret; int ret;
if (obj->gtt_space == NULL) if (!i915_gem_obj_ggtt_bound(obj))
return 0; return 0;
if (obj->pin_count) if (obj->pin_count)
...@@ -2675,11 +2676,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, ...@@ -2675,11 +2676,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
} }
if (obj) { if (obj) {
u32 size = obj->gtt_space->size; u32 size = i915_gem_obj_ggtt_size(obj);
val = (uint64_t)((obj->gtt_offset + size - 4096) & val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32; 0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000; val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= 1 << I965_FENCE_TILING_Y_SHIFT;
...@@ -2699,15 +2700,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, ...@@ -2699,15 +2700,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
u32 val; u32 val;
if (obj) { if (obj) {
u32 size = obj->gtt_space->size; u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val; int pitch_val;
int tile_width; int tile_width;
WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size || (size & -size) != size ||
(obj->gtt_offset & (size - 1)), (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
obj->gtt_offset, obj->map_and_fenceable, size); i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128; tile_width = 128;
...@@ -2718,7 +2719,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, ...@@ -2718,7 +2719,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
pitch_val = obj->stride / tile_width; pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1; pitch_val = ffs(pitch_val) - 1;
val = obj->gtt_offset; val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size); val |= I915_FENCE_SIZE_BITS(size);
...@@ -2743,19 +2744,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, ...@@ -2743,19 +2744,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
uint32_t val; uint32_t val;
if (obj) { if (obj) {
u32 size = obj->gtt_space->size; u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val; uint32_t pitch_val;
WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size || (size & -size) != size ||
(obj->gtt_offset & (size - 1)), (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08x not 512K or pot-size 0x%08x aligned\n", "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
obj->gtt_offset, size); i915_gem_obj_ggtt_offset(obj), size);
pitch_val = obj->stride / 128; pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1; pitch_val = ffs(pitch_val) - 1;
val = obj->gtt_offset; val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size); val |= I830_FENCE_SIZE_BITS(size);
...@@ -3044,8 +3045,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) ...@@ -3044,8 +3045,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
if (obj->cache_level != obj->gtt_space->color) { if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
obj->gtt_space->start, i915_gem_obj_ggtt_offset(obj),
obj->gtt_space->start + obj->gtt_space->size, i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level, obj->cache_level,
obj->gtt_space->color); obj->gtt_space->color);
err++; err++;
...@@ -3056,8 +3057,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) ...@@ -3056,8 +3057,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
obj->gtt_space, obj->gtt_space,
obj->cache_level)) { obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
obj->gtt_space->start, i915_gem_obj_ggtt_offset(obj),
obj->gtt_space->start + obj->gtt_space->size, i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level); obj->cache_level);
err++; err++;
continue; continue;
...@@ -3169,8 +3170,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -3169,8 +3170,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
node->size == fence_size && node->size == fence_size &&
(node->start & (fence_alignment - 1)) == 0; (node->start & (fence_alignment - 1)) == 0;
mappable = mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable; obj->map_and_fenceable = mappable && fenceable;
...@@ -3272,7 +3273,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3272,7 +3273,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret; int ret;
/* Not valid to be called on unbound objects. */ /* Not valid to be called on unbound objects. */
if (obj->gtt_space == NULL) if (!i915_gem_obj_ggtt_bound(obj))
return -EINVAL; return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
...@@ -3337,7 +3338,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3337,7 +3338,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret; return ret;
} }
if (obj->gtt_space) { if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_finish_gpu(obj); ret = i915_gem_object_finish_gpu(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -3360,7 +3361,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3360,7 +3361,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level); obj, cache_level);
obj->gtt_space->color = cache_level; i915_gem_obj_ggtt_set_color(obj, cache_level);
} }
if (cache_level == I915_CACHE_NONE) { if (cache_level == I915_CACHE_NONE) {
...@@ -3641,14 +3642,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3641,14 +3642,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY; return -EBUSY;
if (obj->gtt_space != NULL) { if (i915_gem_obj_ggtt_bound(obj)) {
if ((alignment && obj->gtt_offset & (alignment - 1)) || if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) { (map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count, WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:" "bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x, req.map_and_fenceable=%d," " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n", " obj->map_and_fenceable=%d\n",
obj->gtt_offset, alignment, i915_gem_obj_ggtt_offset(obj), alignment,
map_and_fenceable, map_and_fenceable,
obj->map_and_fenceable); obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
...@@ -3657,7 +3658,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3657,7 +3658,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
} }
} }
if (obj->gtt_space == NULL) { if (!i915_gem_obj_ggtt_bound(obj)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
ret = i915_gem_object_bind_to_gtt(obj, alignment, ret = i915_gem_object_bind_to_gtt(obj, alignment,
...@@ -3683,7 +3684,7 @@ void ...@@ -3683,7 +3684,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj) i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{ {
BUG_ON(obj->pin_count == 0); BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL); BUG_ON(!i915_gem_obj_ggtt_bound(obj));
if (--obj->pin_count == 0) if (--obj->pin_count == 0)
obj->pin_mappable = false; obj->pin_mappable = false;
...@@ -3733,7 +3734,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -3733,7 +3734,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet * as the X server doesn't manage domains yet
*/ */
i915_gem_object_flush_cpu_write_domain(obj); i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj->gtt_offset; args->offset = i915_gem_obj_ggtt_offset(obj);
out: out:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
unlock: unlock:
......
...@@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring, ...@@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, new_context->obj->gtt_offset | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT | MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN | MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN |
......
...@@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT; return -ENOENT;
target_i915_obj = to_intel_bo(target_obj); target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset; target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them * pipe_control writes because the gpu doesn't properly redirect them
...@@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret; return ret;
/* Map the page containing the relocation we're going to perform. */ /* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset; reloc->offset += i915_gem_obj_ggtt_offset(obj);
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK); reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *) reloc_entry = (uint32_t __iomem *)
...@@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, ...@@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1; obj->has_aliasing_ppgtt_mapping = 1;
} }
if (entry->offset != obj->gtt_offset) { if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
entry->offset = obj->gtt_offset; entry->offset = i915_gem_obj_ggtt_offset(obj);
*need_reloc = true; *need_reloc = true;
} }
...@@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) ...@@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_gem_exec_object2 *entry; struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space) if (!i915_gem_obj_ggtt_bound(obj))
return; return;
entry = obj->exec_entry; entry = obj->exec_entry;
...@@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable; bool need_fence, need_mappable;
if (!obj->gtt_space) if (!i915_gem_obj_ggtt_bound(obj))
continue; continue;
need_fence = need_fence =
...@@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj); need_mappable = need_fence || need_reloc_mappable(obj);
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || if ((entry->alignment &&
i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable)) (need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
else else
...@@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
/* Bind fresh objects */ /* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space) if (i915_gem_obj_ggtt_bound(obj))
continue; continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
...@@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err; goto err;
} }
exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
exec_len = args->batch_len; exec_len = args->batch_len;
if (cliprects) { if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) { for (i = 0; i < args->num_cliprects; i++) {
......
...@@ -378,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, ...@@ -378,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
ppgtt->insert_entries(ppgtt, obj->pages, ppgtt->insert_entries(ppgtt, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level); cache_level);
} }
...@@ -386,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, ...@@ -386,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
ppgtt->clear_range(ppgtt, ppgtt->clear_range(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
} }
...@@ -551,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, ...@@ -551,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->gtt.gtt_insert_entries(dev, obj->pages, dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level); cache_level);
obj->has_global_gtt_mapping = 1; obj->has_global_gtt_mapping = 1;
...@@ -563,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) ...@@ -563,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->gtt.gtt_clear_range(obj->base.dev, dev_priv->gtt.gtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0; obj->has_global_gtt_mapping = 0;
......
...@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) ...@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true; return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) { if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (obj->gtt_offset & ~I915_FENCE_START_MASK) if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false; return false;
} else { } else {
if (obj->gtt_offset & ~I830_FENCE_START_MASK) if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false; return false;
} }
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size) if (i915_gem_obj_ggtt_size(obj) != size)
return false; return false;
if (obj->gtt_offset & (size - 1)) if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false; return false;
return true; return true;
...@@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/ */
obj->map_and_fenceable = obj->map_and_fenceable =
obj->gtt_space == NULL || !i915_gem_obj_ggtt_bound(obj) ||
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode)); i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */ /* Rebind if we need a change of alignment */
...@@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_get_gtt_alignment(dev, obj->base.size, i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode, args->tiling_mode,
false); false);
if (obj->gtt_offset & (unfenced_alignment - 1)) if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
} }
......
...@@ -1520,7 +1520,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, ...@@ -1520,7 +1520,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
if (dst == NULL) if (dst == NULL)
return NULL; return NULL;
reloc_offset = src->gtt_offset; reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
unsigned long flags; unsigned long flags;
void *d; void *d;
...@@ -1572,7 +1572,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, ...@@ -1572,7 +1572,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
reloc_offset += PAGE_SIZE; reloc_offset += PAGE_SIZE;
} }
dst->page_count = num_pages; dst->page_count = num_pages;
dst->gtt_offset = src->gtt_offset;
return dst; return dst;
...@@ -1626,7 +1625,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -1626,7 +1625,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name; err->name = obj->base.name;
err->rseqno = obj->last_read_seqno; err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno; err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset; err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
err->read_domains = obj->base.read_domains; err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain; err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg; err->fence_reg = obj->fence_reg;
...@@ -1724,8 +1723,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, ...@@ -1724,8 +1723,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL; return NULL;
obj = ring->private; obj = ring->private;
if (acthd >= obj->gtt_offset && if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < obj->gtt_offset + obj->base.size) acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj); return i915_error_object_create(dev_priv, obj);
} }
...@@ -1806,7 +1805,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring, ...@@ -1806,7 +1805,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
return; return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_object_create_sized(dev_priv, ering->ctx = i915_error_object_create_sized(dev_priv,
obj, 1); obj, 1);
break; break;
...@@ -2160,10 +2159,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in ...@@ -2160,10 +2159,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane); int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
obj->gtt_offset; i915_gem_obj_ggtt_offset(obj);
} else { } else {
int dspaddr = DSPADDR(intel_crtc->plane); int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] + crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8); crtc->x * crtc->fb->bits_per_pixel/8);
} }
......
...@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind, ...@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
TP_fast_assign( TP_fast_assign(
__entry->obj = obj; __entry->obj = obj;
__entry->offset = obj->gtt_space->start; __entry->offset = i915_gem_obj_ggtt_offset(obj);
__entry->size = obj->gtt_space->size; __entry->size = i915_gem_obj_ggtt_size(obj);
__entry->mappable = mappable; __entry->mappable = mappable;
), ),
...@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind, ...@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_fast_assign( TP_fast_assign(
__entry->obj = obj; __entry->obj = obj;
__entry->offset = obj->gtt_space->start; __entry->offset = i915_gem_obj_ggtt_offset(obj);
__entry->size = obj->gtt_space->size; __entry->size = i915_gem_obj_ggtt_size(obj);
), ),
TP_printk("obj=%p, offset=%08x size=%x", TP_printk("obj=%p, offset=%08x size=%x",
......
...@@ -1980,16 +1980,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -1980,16 +1980,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_crtc->dspaddr_offset = linear_offset; intel_crtc->dspaddr_offset = linear_offset;
} }
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane), I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset); i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset); I915_WRITE(DSPLINOFF(plane), linear_offset);
} else } else
I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg); POSTING_READ(reg);
return 0; return 0;
...@@ -2069,11 +2070,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc, ...@@ -2069,11 +2070,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
fb->pitches[0]); fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset; linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane), I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset); i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) { if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x); I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else { } else {
...@@ -6567,7 +6569,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -6567,7 +6569,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin; goto fail_unpin;
} }
addr = obj->gtt_offset; addr = i915_gem_obj_ggtt_offset(obj);
} else { } else {
int align = IS_I830(dev) ? 16 * 1024 : 256; int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj, ret = i915_gem_attach_phys_object(dev, obj,
...@@ -7339,7 +7341,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, ...@@ -7339,7 +7341,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP | intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */ intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc); intel_mark_page_flip_active(intel_crtc);
...@@ -7380,7 +7382,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, ...@@ -7380,7 +7382,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc); intel_mark_page_flip_active(intel_crtc);
...@@ -7420,7 +7422,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, ...@@ -7420,7 +7422,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_ring_emit(ring,
(obj->gtt_offset + intel_crtc->dspaddr_offset) | (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode); obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far /* XXX Enabling the panel-fitter across page-flip is so far
...@@ -7463,7 +7465,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, ...@@ -7463,7 +7465,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP | intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation, /* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page * "Enable Panel Fitter" does not seem to be required when page
...@@ -7528,7 +7530,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, ...@@ -7528,7 +7530,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP)); intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc); intel_mark_page_flip_active(intel_crtc);
......
...@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size; info->fix.smem_len = size;
info->screen_base = info->screen_base =
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size); size);
if (!info->screen_base) { if (!info->screen_base) {
ret = -ENOSPC; ret = -ENOSPC;
...@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height, fb->width, fb->height,
obj->gtt_offset, obj); i915_gem_obj_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) ...@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else else
regs = io_mapping_map_wc(dev_priv->gtt.mappable, regs = io_mapping_map_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset); i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs; return regs;
} }
...@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w; swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h; sheight = params->src_h;
iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y); iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y; ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) { if (params->format & I915_OVERLAY_YUV_PLANAR) {
...@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale); params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16; sheight |= (params->src_h/uv_vscale) << 16;
iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U); iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V); iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
ostride |= params->stride_UV << 16; ostride |= params->stride_UV << 16;
} }
...@@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to pin overlay register bo\n"); DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo; goto out_free_bo;
} }
overlay->flip_addr = reg_bo->gtt_offset; overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) { if (ret) {
...@@ -1435,7 +1435,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) ...@@ -1435,7 +1435,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
overlay->reg_bo->phys_obj->handle->vaddr; overlay->reg_bo->phys_obj->handle->vaddr;
else else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset); i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs; return regs;
} }
...@@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) ...@@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else else
error->base = overlay->reg_bo->gtt_offset; error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay); regs = intel_overlay_map_regs_atomic(overlay);
if (!regs) if (!regs)
......
...@@ -218,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -218,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */ /* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
...@@ -275,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -275,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj; struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset); I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN | IVB_DPFC_CTL_FENCE_EN |
...@@ -3700,7 +3700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) ...@@ -3700,7 +3700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT | MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN | MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN |
...@@ -3723,7 +3723,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) ...@@ -3723,7 +3723,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return; return;
} }
I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
} }
......
...@@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) ...@@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers * registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring * also enforces ordering), otherwise the hw might lose the new ring
* register values. */ * register values. */
I915_WRITE_START(ring, obj->gtt_offset); I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring, I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES) ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID); | RING_VALID);
/* If the head is still not zero, the ring is dead */ /* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
I915_READ_START(ring) == obj->gtt_offset && I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed " DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n", "ctl %08x head %08x tail %08x start %08x\n",
...@@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring) ...@@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
if (ret) if (ret)
goto err_unref; goto err_unref;
pc->gtt_offset = obj->gtt_offset; pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl)); pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL) { if (pc->cpu_page == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, ...@@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_advance(ring); intel_ring_advance(ring);
} else { } else {
struct drm_i915_gem_object *obj = ring->private; struct drm_i915_gem_object *obj = ring->private;
u32 cs_offset = obj->gtt_offset; u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT) if (len > I830_BATCH_LIMIT)
return -ENOSPC; return -ENOSPC;
...@@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
goto err_unref; goto err_unref;
} }
ring->status_page.gfx_addr = obj->gtt_offset; ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) { if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin; goto err_unpin;
ring->virtual_start = ring->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size); ring->size);
if (ring->virtual_start == NULL) { if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n"); DRM_ERROR("Failed to map ringbuffer.\n");
......
...@@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, ...@@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl); I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset); sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane)); POSTING_READ(SPSURF(pipe, plane));
} }
...@@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, ...@@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (intel_plane->can_scale) if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl); I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); I915_MODIFY_DISPBASE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe)); POSTING_READ(SPRSURF(pipe));
/* potentially re-enable LP watermarks */ /* potentially re-enable LP watermarks */
...@@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, ...@@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr); I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); I915_MODIFY_DISPBASE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe)); POSTING_READ(DVSSURF(pipe));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment