Commit f7bbe788 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Embed the io-mapping struct inside drm_i915_private

As io_mapping.h now always allocates the struct, we can avoid that
allocation and extra pointer dance by embedding the struct inside
drm_i915_private
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160819155428.1670-5-chris@chris-wilson.co.uk
parent cafaf14a
...@@ -891,7 +891,7 @@ i915_gem_gtt_pread(struct drm_device *dev, ...@@ -891,7 +891,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
* and write to user memory which may result into page * and write to user memory which may result into page
* faults, and so we cannot perform this under struct_mutex. * faults, and so we cannot perform this under struct_mutex.
*/ */
if (slow_user_access(ggtt->mappable, page_base, if (slow_user_access(&ggtt->mappable, page_base,
page_offset, user_data, page_offset, user_data,
page_length, false)) { page_length, false)) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1187,11 +1187,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, ...@@ -1187,11 +1187,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* If the object is non-shmem backed, we retry again with the * If the object is non-shmem backed, we retry again with the
* path that handles page fault. * path that handles page fault.
*/ */
if (fast_user_write(ggtt->mappable, page_base, if (fast_user_write(&ggtt->mappable, page_base,
page_offset, user_data, page_length)) { page_offset, user_data, page_length)) {
hit_slow_path = true; hit_slow_path = true;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (slow_user_access(ggtt->mappable, if (slow_user_access(&ggtt->mappable,
page_base, page_base,
page_offset, user_data, page_offset, user_data,
page_length, true)) { page_length, true)) {
......
...@@ -474,7 +474,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -474,7 +474,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT; offset += page << PAGE_SHIFT;
} }
vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset); vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
cache->page = page; cache->page = page;
cache->vaddr = (unsigned long)vaddr; cache->vaddr = (unsigned long)vaddr;
......
...@@ -2794,7 +2794,6 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ...@@ -2794,7 +2794,6 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
if (dev_priv->mm.aliasing_ppgtt) { if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->base.cleanup(&ppgtt->base); ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt); kfree(ppgtt);
} }
...@@ -2811,7 +2810,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ...@@ -2811,7 +2810,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
ggtt->base.cleanup(&ggtt->base); ggtt->base.cleanup(&ggtt->base);
arch_phys_wc_del(ggtt->mtrr); arch_phys_wc_del(ggtt->mtrr);
io_mapping_free(ggtt->mappable); io_mapping_fini(&ggtt->mappable);
} }
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
...@@ -3209,9 +3208,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) ...@@ -3209,9 +3208,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_LLC(dev_priv)) if (!HAS_LLC(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust; ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
ggtt->mappable = if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
io_mapping_create_wc(ggtt->mappable_base, ggtt->mappable_end); dev_priv->ggtt.mappable_base,
if (!ggtt->mappable) { dev_priv->ggtt.mappable_end)) {
ret = -EIO; ret = -EIO;
goto out_gtt_cleanup; goto out_gtt_cleanup;
} }
...@@ -3681,7 +3680,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ...@@ -3681,7 +3680,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = vma->iomap; ptr = vma->iomap;
if (ptr == NULL) { if (ptr == NULL) {
ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
vma->node.start, vma->node.start,
vma->node.size); vma->node.size);
if (ptr == NULL) if (ptr == NULL)
......
...@@ -439,13 +439,13 @@ struct i915_address_space { ...@@ -439,13 +439,13 @@ struct i915_address_space {
*/ */
struct i915_ggtt { struct i915_ggtt {
struct i915_address_space base; struct i915_address_space base;
struct io_mapping mappable; /* Mapping to our CPU mappable region */
size_t stolen_size; /* Total size of stolen memory */ size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */ size_t stolen_usable_size; /* Total size minus BIOS reserved */
size_t stolen_reserved_base; size_t stolen_reserved_base;
size_t stolen_reserved_size; size_t stolen_reserved_size;
u64 mappable_end; /* End offset that we can CPU map */ u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */ phys_addr_t mappable_base; /* PA of our GMADR */
/** "Graphics Stolen Memory" holds the global PTEs */ /** "Graphics Stolen Memory" holds the global PTEs */
......
...@@ -729,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -729,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read. * captures what the GPU read.
*/ */
s = io_mapping_map_atomic_wc(ggtt->mappable, s = io_mapping_map_atomic_wc(&ggtt->mappable,
reloc_offset); reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
......
...@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) ...@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_wc(dev_priv->ggtt.mappable, regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
overlay->flip_addr, overlay->flip_addr,
PAGE_SIZE); PAGE_SIZE);
...@@ -1489,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) ...@@ -1489,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *) regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr; overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
overlay->flip_addr); overlay->flip_addr);
return regs; return regs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment