Commit 85e04705 authored by Jon Bloomfield's avatar Jon Bloomfield Committed by Greg Kroah-Hartman

drm/i915/gtt: Read-only pages for insert_entries on bdw+

commit 250f8c81 upstream.

Hook up the flags to allow read-only ppGTT mappings for gen8+

v2: Include a selftest to check that writes to a readonly PTE are
dropped
v3: Don't duplicate cpu_check() as we can just reuse it, and even worse
don't wholesale copy the theory-of-operation comment from igt_ctx_exec
without changing it to explain the intention behind the new test!
v4: Joonas really likes magic mystery values
Signed-off-by: default avatarJon Bloomfield <jon.bloomfield@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180712185315.3288-2-chris@chris-wilson.co.ukSigned-off-by: default avatarJon Bloomfield <jon.bloomfield@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent bebb6a49
...@@ -177,7 +177,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, ...@@ -177,7 +177,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
vma->pages = vma->obj->pages; vma->pages = vma->obj->pages;
/* Currently applicable only to VLV */ /* Applicable to VLV, and gen8+ */
if (vma->obj->gt_ro) if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
...@@ -793,7 +793,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, ...@@ -793,7 +793,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp, struct i915_page_directory_pointer *pdp,
struct sg_page_iter *sg_iter, struct sg_page_iter *sg_iter,
uint64_t start, uint64_t start,
enum i915_cache_level cache_level) enum i915_cache_level cache_level,
u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t *pt_vaddr; gen8_pte_t *pt_vaddr;
...@@ -812,7 +813,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, ...@@ -812,7 +813,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
pt_vaddr[pte] = pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(sg_iter), gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
cache_level, true, 0); cache_level, true, flags);
if (++pte == GEN8_PTES) { if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr); kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL; pt_vaddr = NULL;
...@@ -833,7 +834,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -833,7 +834,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages, struct sg_table *pages,
uint64_t start, uint64_t start,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 unused) u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
...@@ -842,7 +843,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -842,7 +843,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (!USES_FULL_48BIT_PPGTT(vm->dev)) { if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level); cache_level, flags);
} else { } else {
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
uint64_t pml4e; uint64_t pml4e;
...@@ -850,7 +851,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -850,7 +851,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
start, cache_level); start, cache_level, flags);
} }
} }
} }
...@@ -1523,6 +1524,10 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -1523,6 +1524,10 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.clear_range = gen8_ppgtt_clear_range; ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.unbind_vma = ppgtt_unbind_vma; ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma; ppgtt->base.bind_vma = ppgtt_bind_vma;
/* From bdw, there is support for read-only pages in the PPGTT */
ppgtt->base.has_read_only = true;
ppgtt->debug_dump = gen8_dump_ppgtt; ppgtt->debug_dump = gen8_dump_ppgtt;
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
...@@ -2335,7 +2340,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, ...@@ -2335,7 +2340,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm, static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st, struct sg_table *st,
uint64_t start, uint64_t start,
enum i915_cache_level level, u32 unused) enum i915_cache_level level, u32 flags)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
...@@ -2346,6 +2351,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2346,6 +2351,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
int rpm_atomic_seq; int rpm_atomic_seq;
int i = 0; int i = 0;
/* The GTT does not support read-only mappings */
GEM_BUG_ON(flags & PTE_READ_ONLY);
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
...@@ -2607,7 +2615,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -2607,7 +2615,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (ret) if (ret)
return ret; return ret;
/* Currently applicable only to VLV */ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
if (obj->gt_ro) if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
...@@ -3196,6 +3204,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) ...@@ -3196,6 +3204,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->base.total -= PAGE_SIZE; ggtt->base.total -= PAGE_SIZE;
i915_address_space_init(&ggtt->base, dev_priv); i915_address_space_init(&ggtt->base, dev_priv);
ggtt->base.total += PAGE_SIZE; ggtt->base.total += PAGE_SIZE;
/* Only VLV supports read-only GGTT mappings */
ggtt->base.has_read_only = IS_VALLEYVIEW(dev_priv);
if (!HAS_LLC(dev_priv)) if (!HAS_LLC(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust; ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
......
...@@ -392,6 +392,9 @@ struct i915_address_space { ...@@ -392,6 +392,9 @@ struct i915_address_space {
*/ */
struct list_head unbound_list; struct list_head unbound_list;
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
/* FIXME: Need a more generic return type */ /* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr, gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level, enum i915_cache_level level,
......
...@@ -1951,6 +1951,7 @@ void intel_ring_unpin(struct intel_ring *ring) ...@@ -1951,6 +1951,7 @@ void intel_ring_unpin(struct intel_ring *ring)
static struct i915_vma * static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{ {
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
...@@ -1960,10 +1961,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) ...@@ -1960,10 +1961,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
/* mark ring buffers as read-only from GPU side by default */ /*
obj->gt_ro = 1; * Mark ring buffers as read-only from GPU side (so no stray overwrites)
* if supported by the platform's GGTT.
*/
if (vm->has_read_only)
obj->gt_ro = 1;
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); vma = i915_vma_create(obj, vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment