Commit 4d6c1859 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Fill all the unused space in the GGTT

When we allocate space in the GGTT we may have to allocate a larger
region than will be populated by the object to accommodate fencing. Make
sure that this space beyond the end of the buffer points safely into
scratch space, in case the HW tries to access it anyway (e.g. fenced
access to the last tile row).

v2: Preemptively / conservatively guard gen6 ggtt as well.
Reported-by: default avatarImre Deak <imre.deak@intel.com>
References: https://gitlab.freedesktop.org/drm/intel/-/issues/1554Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: stable@vger.kernel.org
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarImre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200331152348.26946-1-chris@chris-wilson.co.uk
parent 708c82d5
...@@ -191,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -191,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, enum i915_cache_level level,
u32 flags) u32 flags)
{ {
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0); const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *gte;
gen8_pte_t __iomem *end;
struct sgt_iter iter;
dma_addr_t addr; dma_addr_t addr;
/* /*
...@@ -202,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -202,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* not to allow the user to override access to a read only page. * not to allow the user to override access to a read only page.
*/ */
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; gte = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; gte += vma->node.start / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, sgt_iter, vma->pages) end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
gen8_set_pte(gtt_entries++, pte_encode | addr);
for_each_sgt_daddr(addr, iter, vma->pages)
gen8_set_pte(gte++, pte_encode | addr);
GEM_BUG_ON(gte > end);
/* Fill the allocated but "unused" space beyond the end of the buffer */
while (gte < end)
gen8_set_pte(gte++, vm->scratch[0].encode);
/* /*
* We want to flush the TLBs only after we're certain all the PTE * We want to flush the TLBs only after we're certain all the PTE
...@@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
u32 flags) u32 flags)
{ {
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; gen6_pte_t __iomem *gte;
unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; gen6_pte_t __iomem *end;
struct sgt_iter iter; struct sgt_iter iter;
dma_addr_t addr; dma_addr_t addr;
gte = (gen6_pte_t __iomem *)ggtt->gsm;
gte += vma->node.start / I915_GTT_PAGE_SIZE;
end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma->pages) for_each_sgt_daddr(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); iowrite32(vm->pte_encode(addr, level, flags), gte++);
GEM_BUG_ON(gte > end);
/* Fill the allocated but "unused" space beyond the end of the buffer */
while (gte < end)
iowrite32(vm->scratch[0].encode, gte++);
/* /*
* We want to flush the TLBs only after we're certain all the PTE * We want to flush the TLBs only after we're certain all the PTE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment