Commit 0d938863 authored by Thomas Hellström's avatar Thomas Hellström

drm/i915/ttm: Implement a function to copy the contents of two TTM-based objects

When backing up or restoring contents of pinned objects at suspend /
resume time we need to allocate a new object as the backup. Add a function
to facilitate copies between the two. Some data needs to be copied before
the migration context is ready for operation, so make sure we can
disable accelerated copies.

v2:
- Fix a missing return value check (Matthew Auld)
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210922062527.865433-2-thomas.hellstrom@linux.intel.com
parent 2dfa597d
...@@ -429,6 +429,7 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, ...@@ -429,6 +429,7 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
static int i915_ttm_accel_move(struct ttm_buffer_object *bo, static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
bool clear, bool clear,
struct ttm_resource *dst_mem, struct ttm_resource *dst_mem,
struct ttm_tt *dst_ttm,
struct sg_table *dst_st) struct sg_table *dst_st)
{ {
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
...@@ -438,14 +439,14 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo, ...@@ -438,14 +439,14 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct sg_table *src_st; struct sg_table *src_st;
struct i915_request *rq; struct i915_request *rq;
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *src_ttm = bo->ttm;
enum i915_cache_level src_level, dst_level; enum i915_cache_level src_level, dst_level;
int ret; int ret;
if (!i915->gt.migrate.context) if (!i915->gt.migrate.context)
return -EINVAL; return -EINVAL;
dst_level = i915_ttm_cache_level(i915, dst_mem, ttm); dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
if (clear) { if (clear) {
if (bo->type == ttm_bo_type_kernel) if (bo->type == ttm_bo_type_kernel)
return -EINVAL; return -EINVAL;
...@@ -462,10 +463,10 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo, ...@@ -462,10 +463,10 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
} }
intel_engine_pm_put(i915->gt.migrate.context->engine); intel_engine_pm_put(i915->gt.migrate.context->engine);
} else { } else {
src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) : src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) :
obj->ttm.cached_io_st; obj->ttm.cached_io_st;
src_level = i915_ttm_cache_level(i915, bo->resource, ttm); src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
intel_engine_pm_get(i915->gt.migrate.context->engine); intel_engine_pm_get(i915->gt.migrate.context->engine);
ret = intel_context_migrate_copy(i915->gt.migrate.context, ret = intel_context_migrate_copy(i915->gt.migrate.context,
NULL, src_st->sgl, src_level, NULL, src_st->sgl, src_level,
...@@ -485,11 +486,14 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo, ...@@ -485,11 +486,14 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear, static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear,
struct ttm_resource *dst_mem, struct ttm_resource *dst_mem,
struct sg_table *dst_st) struct ttm_tt *dst_ttm,
struct sg_table *dst_st,
bool allow_accel)
{ {
int ret; int ret = -EINVAL;
ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_st); if (allow_accel)
ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st);
if (ret) { if (ret) {
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct intel_memory_region *dst_reg, *src_reg; struct intel_memory_region *dst_reg, *src_reg;
...@@ -504,7 +508,7 @@ static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear, ...@@ -504,7 +508,7 @@ static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear,
GEM_BUG_ON(!dst_reg || !src_reg); GEM_BUG_ON(!dst_reg || !src_reg);
dst_iter = !cpu_maps_iomem(dst_mem) ? dst_iter = !cpu_maps_iomem(dst_mem) ?
ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) : ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) :
ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap, ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
dst_st, dst_reg->region.start); dst_st, dst_reg->region.start);
...@@ -559,7 +563,7 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, ...@@ -559,7 +563,7 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))) if (!(clear && ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)))
__i915_ttm_move(bo, clear, dst_mem, dst_st); __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true);
ttm_bo_move_sync_cleanup(bo, dst_mem); ttm_bo_move_sync_cleanup(bo, dst_mem);
i915_ttm_adjust_domains_after_move(obj); i915_ttm_adjust_domains_after_move(obj);
...@@ -974,3 +978,50 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915, ...@@ -974,3 +978,50 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
intel_memory_region_set_name(mr, "system-ttm"); intel_memory_region_set_name(mr, "system-ttm");
return mr; return mr;
} }
/**
* i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
* another
* @dst: The destination object
* @src: The source object
* @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
* @intr: Whether to perform waits interruptible:
*
* Note: The caller is responsible for assuring that the underlying
* TTM objects are populated if needed and locked.
*
* Return: Zero on success. Negative error code on error. If @intr == true,
* then it may return -ERESTARTSYS or -EINTR.
*/
int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
struct drm_i915_gem_object *src,
bool allow_accel, bool intr)
{
struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
struct ttm_operation_ctx ctx = {
.interruptible = intr,
};
struct sg_table *dst_st;
int ret;
assert_object_held(dst);
assert_object_held(src);
/*
* Sync for now. This will change with async moves.
*/
ret = ttm_bo_wait_ctx(dst_bo, &ctx);
if (!ret)
ret = ttm_bo_wait_ctx(src_bo, &ctx);
if (ret)
return ret;
dst_st = gpu_binds_iomem(dst_bo->resource) ?
dst->ttm.cached_io_st : i915_ttm_tt_get_st(dst_bo->ttm);
__i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm,
dst_st, allow_accel);
return 0;
}
...@@ -46,4 +46,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, ...@@ -46,4 +46,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
resource_size_t size, resource_size_t size,
resource_size_t page_size, resource_size_t page_size,
unsigned int flags); unsigned int flags);
int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
struct drm_i915_gem_object *src,
bool allow_accel, bool intr);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment