Commit b290a78b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use helpers for drm_mm_node booleans

A subset of 71724f70 ("drm/mm: Use helpers for drm_mm_node booleans")
in order to prepare drm-intel-next-queued for subsequent patches before
we can backmerge 71724f70 itself.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004142226.13711-1-chris@chris-wilson.co.uk
parent 261ea7e2
...@@ -968,7 +968,7 @@ static void reloc_cache_reset(struct reloc_cache *cache) ...@@ -968,7 +968,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt); intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr); io_mapping_unmap_atomic((void __iomem *)vaddr);
if (cache->node.allocated) { if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.clear_range(&ggtt->vm,
cache->node.start, cache->node.start,
cache->node.size); cache->node.size);
...@@ -1061,7 +1061,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1061,7 +1061,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
} }
offset = cache->node.start; offset = cache->node.start;
if (cache->node.allocated) { if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page), i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
......
...@@ -387,7 +387,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) ...@@ -387,7 +387,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{ {
struct drm_mm_node *node = &ggtt->uc_fw; struct drm_mm_node *node = &ggtt->uc_fw;
GEM_BUG_ON(!node->allocated); GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start)); GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
......
...@@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
GEM_BUG_ON(!node.allocated); GEM_BUG_ON(!drm_mm_node_allocated(&node));
} }
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
...@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset); unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset; unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length; page_length = remain < page_length ? remain : page_length;
if (node.allocated) { if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0); node.start, I915_CACHE_NONE, 0);
...@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence); i915_gem_object_unlock_fence(obj, fence);
out_unpin: out_unpin:
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) { if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node); remove_mappable_node(&node);
} else { } else {
...@@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret) if (ret)
goto out_rpm; goto out_rpm;
GEM_BUG_ON(!node.allocated); GEM_BUG_ON(!drm_mm_node_allocated(&node));
} }
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
...@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset); unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset; unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length; page_length = remain < page_length ? remain : page_length;
if (node.allocated) { if (drm_mm_node_allocated(&node)) {
/* flush the write before we modify the GGTT */ /* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt); intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm, ggtt->vm.insert_page(&ggtt->vm,
...@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin: out_unpin:
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
intel_gt_flush_ggtt_writes(ggtt->vm.gt); intel_gt_flush_ggtt_writes(ggtt->vm.gt);
if (node.allocated) { if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node); remove_mappable_node(&node);
} else { } else {
......
...@@ -299,7 +299,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -299,7 +299,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break; break;
} }
GEM_BUG_ON(!node->allocated); GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node); vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between /* If we are using coloring to insert guard pages between
......
...@@ -795,7 +795,7 @@ void i915_vma_reopen(struct i915_vma *vma) ...@@ -795,7 +795,7 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma)
{ {
GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->fence); GEM_BUG_ON(vma->fence);
mutex_lock(&vma->vm->mutex); mutex_lock(&vma->vm->mutex);
......
...@@ -228,7 +228,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma) ...@@ -228,7 +228,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
static inline u32 i915_ggtt_offset(const struct i915_vma *vma) static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{ {
GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->node.allocated); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(upper_32_bits(vma->node.start)); GEM_BUG_ON(upper_32_bits(vma->node.start));
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
return lower_32_bits(vma->node.start); return lower_32_bits(vma->node.start);
...@@ -390,7 +390,7 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma, ...@@ -390,7 +390,7 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma,
static inline bool i915_node_color_differs(const struct drm_mm_node *node, static inline bool i915_node_color_differs(const struct drm_mm_node *node,
unsigned long color) unsigned long color)
{ {
return node->allocated && node->color != color; return drm_mm_node_allocated(node) && node->color != color;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment