Commit 0a03852e authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915: support 2M pages for the 48b PPGTT

Support inserting 2M gtt pages into the 48b PPGTT.

v2: sanity check sg->length against page_size

v3: don't recalculate rem on each loop
    whitespace breakup
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006145041.21673-13-matthew.auld@intel.comSigned-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006221833.32439-12-chris@chris-wilson.co.uk
parent 8cb09836
......@@ -1013,6 +1013,69 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
cache_level);
}
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
struct i915_page_directory_pointer **pdps,
struct sgt_dma *iter,
enum i915_cache_level cache_level)
{
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
u64 start = vma->node.start;
dma_addr_t rem = iter->sg->length;
do {
struct gen8_insert_pte idx = gen8_insert_pte(start);
struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
unsigned int page_size;
gen8_pte_t encode = pte_encode;
gen8_pte_t *vaddr;
u16 index, max;
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
index = idx.pde;
max = I915_PDES;
page_size = I915_GTT_PAGE_SIZE_2M;
encode |= GEN8_PDE_PS_2M;
vaddr = kmap_atomic_px(pd);
} else {
struct i915_page_table *pt = pd->page_table[idx.pde];
index = idx.pte;
max = GEN8_PTES;
page_size = I915_GTT_PAGE_SIZE;
vaddr = kmap_atomic_px(pt);
}
do {
GEM_BUG_ON(iter->sg->length < page_size);
vaddr[index++] = encode | iter->dma;
start += page_size;
iter->dma += page_size;
rem -= page_size;
if (iter->dma >= iter->max) {
iter->sg = __sg_next(iter->sg);
if (!iter->sg)
break;
rem = iter->sg->length;
iter->dma = sg_dma_address(iter->sg);
iter->max = iter->dma + rem;
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
break;
}
} while (rem >= page_size && index < max);
kunmap_atomic(vaddr);
} while (iter->sg);
}
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
......@@ -1025,11 +1088,16 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
.max = iter.dma + iter.sg->length,
};
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
&idx, cache_level))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
} else {
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
&iter, &idx, cache_level))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
}
}
static void gen8_free_page_tables(struct i915_address_space *vm,
......
......@@ -154,6 +154,8 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
#define GEN8_PDE_PS_2M BIT(7)
struct sg_table;
struct intel_rotation_info {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment