Commit 7464284b authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915: align the vma start to the largest gtt page size

For the 48b PPGTT try to align the vma start address to the required
page size boundary to guarantee we use said page size in the gtt. If we
are dealing with multiple page sizes, we can't guarantee anything and
just align to the largest. For soft pinning and objects which need to be
tightly packed into the lower 32bits we don't force any alignment.

v2: various improvements suggested by Chris

v3: use set_pages and better placement of page_sizes

v4: prefer upper_32_bits()

v5: assign vma->page_sizes = vma->obj->page_sizes directly
    prefer sizeof(vma->page_sizes)

v6: fixup checking of end to exclude GGTT (which are assumed to be
    limited to 4G).
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006145041.21673-9-matthew.auld@intel.comSigned-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006221833.32439-8-chris@chris-wilson.co.uk
parent fa3f46af
...@@ -226,6 +226,8 @@ static int ppgtt_set_pages(struct i915_vma *vma) ...@@ -226,6 +226,8 @@ static int ppgtt_set_pages(struct i915_vma *vma)
vma->pages = vma->obj->mm.pages; vma->pages = vma->obj->mm.pages;
vma->page_sizes = vma->obj->mm.page_sizes;
return 0; return 0;
} }
...@@ -238,6 +240,8 @@ static void clear_pages(struct i915_vma *vma) ...@@ -238,6 +240,8 @@ static void clear_pages(struct i915_vma *vma)
kfree(vma->pages); kfree(vma->pages);
} }
vma->pages = NULL; vma->pages = NULL;
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
} }
static gen8_pte_t gen8_pte_encode(dma_addr_t addr, static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
...@@ -2538,6 +2542,8 @@ static int ggtt_set_pages(struct i915_vma *vma) ...@@ -2538,6 +2542,8 @@ static int ggtt_set_pages(struct i915_vma *vma)
if (ret) if (ret)
return ret; return ret;
vma->page_sizes = vma->obj->mm.page_sizes;
return 0; return 0;
} }
......
...@@ -493,6 +493,22 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -493,6 +493,22 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (ret) if (ret)
goto err_clear; goto err_clear;
} else { } else {
/*
* We only support huge gtt pages through the 48b PPGTT,
* however we also don't want to force any alignment for
* objects which need to be tightly packed into the low 32bits.
*
* Note that we assume that GGTT are limited to 4GiB for the
* forseeable future. See also i915_ggtt_offset().
*/
if (upper_32_bits(end - 1) &&
vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
u64 page_alignment =
rounddown_pow_of_two(vma->page_sizes.sg);
alignment = max(alignment, page_alignment);
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node, ret = i915_gem_gtt_insert(vma->vm, &vma->node,
size, alignment, obj->cache_level, size, alignment, obj->cache_level,
start, end, flags); start, end, flags);
......
...@@ -55,6 +55,7 @@ struct i915_vma { ...@@ -55,6 +55,7 @@ struct i915_vma {
void __iomem *iomap; void __iomem *iomap;
u64 size; u64 size;
u64 display_alignment; u64 display_alignment;
struct i915_page_sizes page_sizes;
u32 fence_size; u32 fence_size;
u32 fence_alignment; u32 fence_alignment;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment