Commit 12b07256 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Export ppgtt_bind_vma

Reuse the ppgtt_bind_vma() for aliasing_ppgtt_bind_vma() so we can
reduce some code near-duplication. The catch is that we need to then
pass along the i915_address_space and not rely on vma->vm, as they
differ with the aliasing-ppgtt.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200703102519.26539-1-chris@chris-wilson.co.uk
parent 5cecf507
...@@ -32,16 +32,17 @@ static void vma_clear_pages(struct i915_vma *vma) ...@@ -32,16 +32,17 @@ static void vma_clear_pages(struct i915_vma *vma)
vma->pages = NULL; vma->pages = NULL;
} }
static int vma_bind(struct i915_vma *vma, static int vma_bind(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
return vma->vm->vma_ops.bind_vma(vma, cache_level, flags); return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
} }
static void vma_unbind(struct i915_vma *vma) static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{ {
vma->vm->vma_ops.unbind_vma(vma); vm->vma_ops.unbind_vma(vm, vma);
} }
static const struct i915_vma_ops proxy_vma_ops = { static const struct i915_vma_ops proxy_vma_ops = {
......
...@@ -299,11 +299,12 @@ static void pd_vma_clear_pages(struct i915_vma *vma) ...@@ -299,11 +299,12 @@ static void pd_vma_clear_pages(struct i915_vma *vma)
vma->pages = NULL; vma->pages = NULL;
} }
static int pd_vma_bind(struct i915_vma *vma, static int pd_vma_bind(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 unused) u32 unused)
{ {
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct gen6_ppgtt *ppgtt = vma->private; struct gen6_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
...@@ -314,7 +315,7 @@ static int pd_vma_bind(struct i915_vma *vma, ...@@ -314,7 +315,7 @@ static int pd_vma_bind(struct i915_vma *vma,
return 0; return 0;
} }
static void pd_vma_unbind(struct i915_vma *vma) static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{ {
struct gen6_ppgtt *ppgtt = vma->private; struct gen6_ppgtt *ppgtt = vma->private;
struct i915_page_directory * const pd = ppgtt->base.pd; struct i915_page_directory * const pd = ppgtt->base.pd;
......
...@@ -436,7 +436,8 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, ...@@ -436,7 +436,8 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
} }
static int ggtt_bind_vma(struct i915_vma *vma, static int ggtt_bind_vma(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
...@@ -451,15 +452,15 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -451,15 +452,15 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj)) if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vm->insert_entries(vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
return 0; return 0;
} }
static void ggtt_unbind_vma(struct i915_vma *vma) static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{ {
vma->vm->clear_range(vma->vm, vma->node.start, vma->size); vm->clear_range(vm, vma->node.start, vma->size);
} }
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
...@@ -567,7 +568,8 @@ static int init_ggtt(struct i915_ggtt *ggtt) ...@@ -567,7 +568,8 @@ static int init_ggtt(struct i915_ggtt *ggtt)
return ret; return ret;
} }
static int aliasing_gtt_bind_vma(struct i915_vma *vma, static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
...@@ -580,44 +582,27 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -580,44 +582,27 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) { if (flags & I915_VMA_LOCAL_BIND) {
struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;
if (flags & I915_VMA_ALLOC) { ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
ret = alias->vm.allocate_va_range(&alias->vm, if (ret)
vma->node.start, return ret;
vma->size);
if (ret)
return ret;
set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
}
GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
__i915_vma_flags(vma)));
alias->vm.insert_entries(&alias->vm, vma,
cache_level, pte_flags);
} }
if (flags & I915_VMA_GLOBAL_BIND) if (flags & I915_VMA_GLOBAL_BIND)
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vm->insert_entries(vm, vma, cache_level, pte_flags);
return 0; return 0;
} }
static void aliasing_gtt_unbind_vma(struct i915_vma *vma) static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma *vma)
{ {
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
struct i915_address_space *vm = vma->vm;
vm->clear_range(vm, vma->node.start, vma->size); vm->clear_range(vm, vma->node.start, vma->size);
}
if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
struct i915_address_space *vm =
&i915_vm_to_ggtt(vma->vm)->alias->vm;
vm->clear_range(vm, vma->node.start, vma->size); if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
} ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
} }
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
......
...@@ -198,14 +198,16 @@ struct intel_gt; ...@@ -198,14 +198,16 @@ struct intel_gt;
struct i915_vma_ops { struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */ /* Map an object into an address space with the given cache flags. */
int (*bind_vma)(struct i915_vma *vma, int (*bind_vma)(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags); u32 flags);
/* /*
* Unmap an object from an address space. This usually consists of * Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. * setting the valid PTE entries to a reserved scratch page.
*/ */
void (*unbind_vma)(struct i915_vma *vma); void (*unbind_vma)(struct i915_address_space *vm,
struct i915_vma *vma);
int (*set_pages)(struct i915_vma *vma); int (*set_pages)(struct i915_vma *vma);
void (*clear_pages)(struct i915_vma *vma); void (*clear_pages)(struct i915_vma *vma);
...@@ -566,6 +568,13 @@ int ggtt_set_pages(struct i915_vma *vma); ...@@ -566,6 +568,13 @@ int ggtt_set_pages(struct i915_vma *vma);
int ppgtt_set_pages(struct i915_vma *vma); int ppgtt_set_pages(struct i915_vma *vma);
void clear_pages(struct i915_vma *vma); void clear_pages(struct i915_vma *vma);
int ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma *vma);
void gtt_write_workarounds(struct intel_gt *gt); void gtt_write_workarounds(struct intel_gt *gt);
void setup_private_pat(struct intel_uncore *uncore); void setup_private_pat(struct intel_uncore *uncore);
......
...@@ -155,16 +155,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt) ...@@ -155,16 +155,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
return ppgtt; return ppgtt;
} }
static int ppgtt_bind_vma(struct i915_vma *vma, int ppgtt_bind_vma(struct i915_address_space *vm,
enum i915_cache_level cache_level, struct i915_vma *vma,
u32 flags) enum i915_cache_level cache_level,
u32 flags)
{ {
u32 pte_flags; u32 pte_flags;
int err; int err;
if (flags & I915_VMA_ALLOC) { if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
err = vma->vm->allocate_va_range(vma->vm, err = vm->allocate_va_range(vm, vma->node.start, vma->size);
vma->node.start, vma->size);
if (err) if (err)
return err; return err;
...@@ -176,17 +176,16 @@ static int ppgtt_bind_vma(struct i915_vma *vma, ...@@ -176,17 +176,16 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(vma->obj)) if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); vm->insert_entries(vm, vma, cache_level, pte_flags);
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
wmb(); wmb();
return 0; return 0;
} }
static void ppgtt_unbind_vma(struct i915_vma *vma) void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{ {
if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
vma->vm->clear_range(vma->vm, vma->node.start, vma->size); vm->clear_range(vm, vma->node.start, vma->size);
} }
int ppgtt_set_pages(struct i915_vma *vma) int ppgtt_set_pages(struct i915_vma *vma)
......
...@@ -304,7 +304,7 @@ static int __vma_bind(struct dma_fence_work *work) ...@@ -304,7 +304,7 @@ static int __vma_bind(struct dma_fence_work *work)
struct i915_vma *vma = vw->vma; struct i915_vma *vma = vw->vma;
int err; int err;
err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); err = vma->ops->bind_vma(vma->vm, vma, vw->cache_level, vw->flags);
if (err) if (err)
atomic_or(I915_VMA_ERROR, &vma->flags); atomic_or(I915_VMA_ERROR, &vma->flags);
...@@ -407,7 +407,7 @@ int i915_vma_bind(struct i915_vma *vma, ...@@ -407,7 +407,7 @@ int i915_vma_bind(struct i915_vma *vma,
work->vma = vma; work->vma = vma;
work->cache_level = cache_level; work->cache_level = cache_level;
work->flags = bind_flags | I915_VMA_ALLOC; work->flags = bind_flags;
/* /*
* Note we only want to chain up to the migration fence on * Note we only want to chain up to the migration fence on
...@@ -433,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma, ...@@ -433,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma,
work->pinned = vma->obj; work->pinned = vma->obj;
} }
} else { } else {
ret = vma->ops->bind_vma(vma, cache_level, bind_flags); ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1261,7 +1261,7 @@ void __i915_vma_evict(struct i915_vma *vma) ...@@ -1261,7 +1261,7 @@ void __i915_vma_evict(struct i915_vma *vma)
if (likely(atomic_read(&vma->vm->open))) { if (likely(atomic_read(&vma->vm->open))) {
trace_i915_vma_unbind(vma); trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma); vma->ops->unbind_vma(vma->vm, vma);
} }
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags); &vma->flags);
......
...@@ -235,7 +235,6 @@ struct i915_vma { ...@@ -235,7 +235,6 @@ struct i915_vma {
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND) #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
#define I915_VMA_ALLOC_BIT 12 #define I915_VMA_ALLOC_BIT 12
#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
#define I915_VMA_ERROR_BIT 13 #define I915_VMA_ERROR_BIT 13
#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT)) #define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
......
...@@ -38,7 +38,8 @@ static void mock_insert_entries(struct i915_address_space *vm, ...@@ -38,7 +38,8 @@ static void mock_insert_entries(struct i915_address_space *vm,
{ {
} }
static int mock_bind_ppgtt(struct i915_vma *vma, static int mock_bind_ppgtt(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
...@@ -47,7 +48,8 @@ static int mock_bind_ppgtt(struct i915_vma *vma, ...@@ -47,7 +48,8 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
return 0; return 0;
} }
static void mock_unbind_ppgtt(struct i915_vma *vma) static void mock_unbind_ppgtt(struct i915_address_space *vm,
struct i915_vma *vma)
{ {
} }
...@@ -88,7 +90,8 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) ...@@ -88,7 +90,8 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
return ppgtt; return ppgtt;
} }
static int mock_bind_ggtt(struct i915_vma *vma, static int mock_bind_ggtt(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
...@@ -96,7 +99,8 @@ static int mock_bind_ggtt(struct i915_vma *vma, ...@@ -96,7 +99,8 @@ static int mock_bind_ggtt(struct i915_vma *vma,
return 0; return 0;
} }
static void mock_unbind_ggtt(struct i915_vma *vma) static void mock_unbind_ggtt(struct i915_address_space *vm,
struct i915_vma *vma)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment