Commit f0e4a063 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move GEM domain management to its own file

Continuing the decluttering of i915_gem.c, that of the read/write
domains, perhaps the biggest of GEM's follies?
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-7-chris@chris-wilson.co.uk
parent b414fcd5
......@@ -87,6 +87,7 @@ i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
obj-y += gem/
gem-y += \
gem/i915_gem_domain.o \
gem/i915_gem_object.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
......
This diff is collapsed.
......@@ -15,6 +15,8 @@
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
void i915_gem_init__objects(struct drm_i915_private *i915);
struct drm_i915_gem_object *i915_gem_object_alloc(void);
......@@ -358,6 +360,20 @@ void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
#define CLFLUSH_BEFORE BIT(0)
#define CLFLUSH_AFTER BIT(1)
#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
static inline void
i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
}
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{
......@@ -379,6 +395,19 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->cache_dirty)
......
......@@ -1764,7 +1764,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
goto err_free_bb;
}
ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
if (ret)
goto err_free_obj;
......@@ -1813,7 +1813,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
err_unmap:
i915_gem_object_unpin_map(bb->obj);
err_finish_shmem_access:
i915_gem_obj_finish_shmem_access(bb->obj);
i915_gem_object_finish_access(bb->obj);
err_free_obj:
i915_gem_object_put(bb->obj);
err_free_bb:
......
......@@ -482,7 +482,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb->obj->base.size);
bb->clflush &= ~CLFLUSH_AFTER;
}
i915_gem_obj_finish_shmem_access(bb->obj);
i915_gem_object_finish_access(bb->obj);
bb->accessing = false;
} else {
......@@ -510,7 +510,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (ret)
goto err;
i915_gem_obj_finish_shmem_access(bb->obj);
i915_gem_object_finish_access(bb->obj);
bb->accessing = false;
ret = i915_vma_move_to_active(bb->vma,
......@@ -588,7 +588,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
if (bb->accessing)
i915_gem_obj_finish_shmem_access(bb->obj);
i915_gem_object_finish_access(bb->obj);
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
......
......@@ -1058,11 +1058,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
void *dst, *src;
int ret;
ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
if (ret)
return ERR_PTR(ret);
ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush);
ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
if (ret) {
dst = ERR_PTR(ret);
goto unpin_src;
......@@ -1120,9 +1120,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
*needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
unpin_dst:
i915_gem_obj_finish_shmem_access(dst_obj);
i915_gem_object_finish_access(dst_obj);
unpin_src:
i915_gem_obj_finish_shmem_access(src_obj);
i915_gem_object_finish_access(src_obj);
return dst;
}
......
......@@ -2814,20 +2814,6 @@ static inline int __sg_page_count(const struct scatterlist *sg)
return sg->length >> PAGE_SHIFT;
}
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
#define CLFLUSH_BEFORE BIT(0)
#define CLFLUSH_AFTER BIT(1)
#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
static inline void
i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
}
static inline int __must_check
i915_mutex_lock_interruptible(struct drm_device *dev)
{
......@@ -2890,18 +2876,6 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
const struct i915_sched_attr *attr);
#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
......
This diff is collapsed.
......@@ -1026,7 +1026,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
mb();
kunmap_atomic(vaddr);
i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
wmb();
io_mapping_unmap_atomic((void __iomem *)vaddr);
......@@ -1058,7 +1058,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
unsigned int flushes;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
err = i915_gem_object_prepare_write(obj, &flushes);
if (err)
return ERR_PTR(err);
......
......@@ -84,7 +84,7 @@ static int render_state_setup(struct intel_render_state *so,
u32 *d;
int ret;
ret = i915_gem_obj_prepare_shmem_write(so->obj, &needs_clflush);
ret = i915_gem_object_prepare_write(so->obj, &needs_clflush);
if (ret)
return ret;
......@@ -166,7 +166,7 @@ static int render_state_setup(struct intel_render_state *so,
ret = 0;
out:
i915_gem_obj_finish_shmem_access(so->obj);
i915_gem_object_finish_access(so->obj);
return ret;
err:
......
......@@ -1017,7 +1017,7 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
unsigned long n;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
err = i915_gem_object_prepare_read(obj, &needs_flush);
if (err)
return err;
......@@ -1038,7 +1038,7 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
kunmap_atomic(ptr);
}
i915_gem_obj_finish_shmem_access(obj);
i915_gem_object_finish_access(obj);
return err;
}
......
......@@ -37,7 +37,7 @@ static int cpu_set(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
err = i915_gem_object_prepare_write(obj, &needs_clflush);
if (err)
return err;
......@@ -54,7 +54,7 @@ static int cpu_set(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
i915_gem_object_finish_access(obj);
return 0;
}
......@@ -69,7 +69,7 @@ static int cpu_get(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
err = i915_gem_object_prepare_read(obj, &needs_clflush);
if (err)
return err;
......@@ -83,7 +83,7 @@ static int cpu_get(struct drm_i915_gem_object *obj,
*v = *cpu;
kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
i915_gem_object_finish_access(obj);
return 0;
}
......
......@@ -354,7 +354,7 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
unsigned int n, m, need_flush;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
err = i915_gem_object_prepare_write(obj, &need_flush);
if (err)
return err;
......@@ -369,7 +369,7 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
kunmap_atomic(map);
}
i915_gem_obj_finish_shmem_access(obj);
i915_gem_object_finish_access(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
obj->write_domain = 0;
return 0;
......@@ -381,7 +381,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
unsigned int n, m, needs_flush;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
err = i915_gem_object_prepare_read(obj, &needs_flush);
if (err)
return err;
......@@ -419,7 +419,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
break;
}
i915_gem_obj_finish_shmem_access(obj);
i915_gem_object_finish_access(obj);
return err;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment