Commit 49d73912 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Convert vm->dev backpointer to vm->i915

99% of the time we access i915_address_space->dev we want the i915
device and not the drm device, so let's store the drm_i915_private
backpointer instead. The only real complication here are the inlines
in i915_vma.h where drm_i915_private is not yet defined and so we have
to choose an alternate path for our asserts.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161129095008.32622-1-chris@chris-wilson.co.ukReviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 3aaa8aba
...@@ -3532,7 +3532,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3532,7 +3532,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
void void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->dev->struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(vma->obj->pin_display == 0)) if (WARN_ON(vma->obj->pin_display == 0))
return; return;
......
...@@ -96,7 +96,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -96,7 +96,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
u64 start, u64 end, u64 start, u64 end,
unsigned flags) unsigned flags)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = vm->i915;
struct list_head eviction_list; struct list_head eviction_list;
struct list_head *phases[] = { struct list_head *phases[] = {
&vm->inactive_list, &vm->inactive_list,
...@@ -106,7 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -106,7 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
int ret; int ret;
lockdep_assert_held(&vm->dev->struct_mutex); lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags); trace_i915_gem_evict(vm, min_size, alignment, flags);
/* /*
...@@ -162,7 +162,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -162,7 +162,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
* back to userspace to give our workqueues time to * back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts. * acquire our locks and unpin the old scanouts.
*/ */
return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC; return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
} }
/* Not everything in the GGTT is tracked via vma (otherwise we /* Not everything in the GGTT is tracked via vma (otherwise we
...@@ -217,7 +217,7 @@ i915_gem_evict_for_vma(struct i915_vma *target) ...@@ -217,7 +217,7 @@ i915_gem_evict_for_vma(struct i915_vma *target)
{ {
struct drm_mm_node *node, *next; struct drm_mm_node *node, *next;
lockdep_assert_held(&target->vm->dev->struct_mutex); lockdep_assert_held(&target->vm->i915->drm.struct_mutex);
list_for_each_entry_safe(node, next, list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list, &target->vm->mm.head_node.node_list,
...@@ -272,11 +272,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) ...@@ -272,11 +272,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
int ret; int ret;
lockdep_assert_held(&vm->dev->struct_mutex); lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict_vm(vm); trace_i915_gem_evict_vm(vm);
if (do_idle) { if (do_idle) {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = vm->i915;
if (i915_is_ggtt(vm)) { if (i915_is_ggtt(vm)) {
ret = i915_gem_switch_to_kernel_context(dev_priv); ret = i915_gem_switch_to_kernel_context(dev_priv);
......
...@@ -290,7 +290,7 @@ i915_vma_put_fence(struct i915_vma *vma) ...@@ -290,7 +290,7 @@ i915_vma_put_fence(struct i915_vma *vma)
{ {
struct drm_i915_fence_reg *fence = vma->fence; struct drm_i915_fence_reg *fence = vma->fence;
assert_rpm_wakelock_held(to_i915(vma->vm->dev)); assert_rpm_wakelock_held(vma->vm->i915);
if (!fence) if (!fence)
return 0; return 0;
...@@ -313,7 +313,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) ...@@ -313,7 +313,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
} }
/* Wait for completion of pending flips which consume fences */ /* Wait for completion of pending flips which consume fences */
if (intel_has_pending_fb_unpin(&dev_priv->drm)) if (intel_has_pending_fb_unpin(dev_priv))
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK); return ERR_PTR(-EDEADLK);
...@@ -346,7 +346,7 @@ i915_vma_get_fence(struct i915_vma *vma) ...@@ -346,7 +346,7 @@ i915_vma_get_fence(struct i915_vma *vma)
/* Note that we revoke fences on runtime suspend. Therefore the user /* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence. * must keep the device awake whilst using the fence.
*/ */
assert_rpm_wakelock_held(to_i915(vma->vm->dev)); assert_rpm_wakelock_held(vma->vm->i915);
/* Just update our place in the LRU if our fence is getting reused. */ /* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) { if (vma->fence) {
...@@ -357,7 +357,7 @@ i915_vma_get_fence(struct i915_vma *vma) ...@@ -357,7 +357,7 @@ i915_vma_get_fence(struct i915_vma *vma)
return 0; return 0;
} }
} else if (set) { } else if (set) {
fence = fence_find(to_i915(vma->vm->dev)); fence = fence_find(vma->vm->i915);
if (IS_ERR(fence)) if (IS_ERR(fence))
return PTR_ERR(fence); return PTR_ERR(fence);
} else } else
......
This diff is collapsed.
...@@ -220,7 +220,7 @@ struct i915_pml4 { ...@@ -220,7 +220,7 @@ struct i915_pml4 {
struct i915_address_space { struct i915_address_space {
struct drm_mm mm; struct drm_mm mm;
struct i915_gem_timeline timeline; struct i915_gem_timeline timeline;
struct drm_device *dev; struct drm_i915_private *i915;
/* Every address space belongs to a struct file - except for the global /* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In * GTT that is owned by the driver (and so @file is set to NULL). In
* principle, no information should leak from one context to another * principle, no information should leak from one context to another
......
...@@ -119,7 +119,7 @@ i915_tiling_ok(struct drm_i915_private *dev_priv, ...@@ -119,7 +119,7 @@ i915_tiling_ok(struct drm_i915_private *dev_priv,
static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
{ {
struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); struct drm_i915_private *dev_priv = vma->vm->i915;
u32 size; u32 size;
if (!i915_vma_is_map_and_fenceable(vma)) if (!i915_vma_is_map_and_fenceable(vma))
......
...@@ -198,9 +198,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ...@@ -198,9 +198,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr; void __iomem *ptr;
/* Access through the GTT requires the device to be awake. */ /* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(to_i915(vma->vm->dev)); assert_rpm_wakelock_held(vma->vm->i915);
lockdep_assert_held(&vma->vm->dev->struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
return IO_ERR_PTR(-ENODEV); return IO_ERR_PTR(-ENODEV);
...@@ -347,7 +347,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, ...@@ -347,7 +347,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma,
static int static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{ {
struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); struct drm_i915_private *dev_priv = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
u64 start, end; u64 start, end;
int ret; int ret;
...@@ -469,7 +469,7 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -469,7 +469,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
unsigned int bound = vma->flags; unsigned int bound = vma->flags;
int ret; int ret;
lockdep_assert_held(&vma->vm->dev->struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
...@@ -567,7 +567,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -567,7 +567,7 @@ int i915_vma_unbind(struct i915_vma *vma)
for_each_active(active, idx) { for_each_active(active, idx) {
ret = i915_gem_active_retire(&vma->last_read[idx], ret = i915_gem_active_retire(&vma->last_read[idx],
&vma->vm->dev->struct_mutex); &vma->vm->i915->drm.struct_mutex);
if (ret) if (ret)
break; break;
} }
......
...@@ -282,7 +282,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); ...@@ -282,7 +282,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*/ */
static inline void i915_vma_unpin_iomap(struct i915_vma *vma) static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->dev->struct_mutex); lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL); GEM_BUG_ON(vma->iomap == NULL);
i915_vma_unpin(vma); i915_vma_unpin(vma);
} }
...@@ -311,7 +311,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma) ...@@ -311,7 +311,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
static inline bool static inline bool
i915_vma_pin_fence(struct i915_vma *vma) i915_vma_pin_fence(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->dev->struct_mutex); lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
if (vma->fence) { if (vma->fence) {
vma->fence->pin_count++; vma->fence->pin_count++;
return true; return true;
...@@ -330,7 +330,7 @@ i915_vma_pin_fence(struct i915_vma *vma) ...@@ -330,7 +330,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
static inline void static inline void
i915_vma_unpin_fence(struct i915_vma *vma) i915_vma_unpin_fence(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->dev->struct_mutex); lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
if (vma->fence) { if (vma->fence) {
GEM_BUG_ON(vma->fence->pin_count <= 0); GEM_BUG_ON(vma->fence->pin_count <= 0);
vma->fence->pin_count--; vma->fence->pin_count--;
......
...@@ -4228,9 +4228,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) ...@@ -4228,9 +4228,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100); udelay(100);
} }
bool intel_has_pending_fb_unpin(struct drm_device *dev) bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc; struct intel_crtc *crtc;
/* Note that we don't need to be called with mode_config.lock here /* Note that we don't need to be called with mode_config.lock here
...@@ -4240,7 +4239,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) ...@@ -4240,7 +4239,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
* cannot claim and pin a new fb without at least acquring the * cannot claim and pin a new fb without at least acquring the
* struct_mutex and so serialising with us. * struct_mutex and so serialising with us.
*/ */
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(&dev_priv->drm, crtc) {
if (atomic_read(&crtc->unpin_work_count) == 0) if (atomic_read(&crtc->unpin_work_count) == 0)
continue; continue;
......
...@@ -1219,7 +1219,7 @@ unsigned int intel_fb_xy_to_linear(int x, int y, ...@@ -1219,7 +1219,7 @@ unsigned int intel_fb_xy_to_linear(int x, int y,
void intel_add_fb_offsets(int *x, int *y, void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane); const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
bool intel_has_pending_fb_unpin(struct drm_device *dev); bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_i915_private *dev_priv); void intel_mark_busy(struct drm_i915_private *dev_priv);
void intel_mark_idle(struct drm_i915_private *dev_priv); void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc); void intel_crtc_restore_mode(struct drm_crtc *crtc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment