Commit 08dea767 authored by Matt Roper's avatar Matt Roper Committed by Rodrigo Vivi

drm/xe: Move migration from GT to tile

Migration primarily focuses on the memory associated with a tile, so it
makes more sense to track this at the tile level (especially since the
driver was already skipping migration operations on media GTs).

Note that the blitter engine used to perform the migration always lives
in the tile's primary GT today.  In theory that could change if media
GTs ever start including blitter engines in the future, but we can
extend the design if/when that happens in the future.

v2:
 - Fix kunit test build
 - Kerneldoc parameter name update
v3:
 - Removed leftover prototype for removed function.  (Gustavo)
 - Remove unrelated / unwanted error handling change.  (Gustavo)

Cc: Gustavo Sousa <gustavo.sousa@intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Acked-by: default avatarGustavo Sousa <gustavo.sousa@intel.com>
Link: https://lore.kernel.org/r/20230601215244.678611-15-matthew.d.roper@intel.comSigned-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 876611c2
...@@ -35,7 +35,7 @@ static int ccs_test_migrate(struct xe_gt *gt, struct xe_bo *bo, ...@@ -35,7 +35,7 @@ static int ccs_test_migrate(struct xe_gt *gt, struct xe_bo *bo,
/* Optionally clear bo *and* CCS data in VRAM. */ /* Optionally clear bo *and* CCS data in VRAM. */
if (clear) { if (clear) {
fence = xe_migrate_clear(gt->migrate, bo, bo->ttm.resource); fence = xe_migrate_clear(gt_to_tile(gt)->migrate, bo, bo->ttm.resource);
if (IS_ERR(fence)) { if (IS_ERR(fence)) {
KUNIT_FAIL(test, "Failed to submit bo clear.\n"); KUNIT_FAIL(test, "Failed to submit bo clear.\n");
return PTR_ERR(fence); return PTR_ERR(fence);
...@@ -174,7 +174,7 @@ static int evict_test_run_gt(struct xe_device *xe, struct xe_gt *gt, struct kuni ...@@ -174,7 +174,7 @@ static int evict_test_run_gt(struct xe_device *xe, struct xe_gt *gt, struct kuni
struct xe_bo *bo, *external; struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_CREATE_USER_BIT | unsigned int bo_flags = XE_BO_CREATE_USER_BIT |
XE_BO_CREATE_VRAM_IF_DGFX(gt_to_tile(gt)); XE_BO_CREATE_VRAM_IF_DGFX(gt_to_tile(gt));
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->primary_gt.migrate); struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
int err, i; int err, i;
......
...@@ -101,14 +101,14 @@ static const struct xe_migrate_pt_update_ops sanity_ops = { ...@@ -101,14 +101,14 @@ static const struct xe_migrate_pt_update_ops sanity_ops = {
static void test_copy(struct xe_migrate *m, struct xe_bo *bo, static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test) struct kunit *test)
{ {
struct xe_device *xe = gt_to_xe(m->gt); struct xe_device *xe = tile_to_xe(m->tile);
u64 retval, expected = 0; u64 retval, expected = 0;
bool big = bo->size >= SZ_2M; bool big = bo->size >= SZ_2M;
struct dma_fence *fence; struct dma_fence *fence;
const char *str = big ? "Copying big bo" : "Copying small bo"; const char *str = big ? "Copying big bo" : "Copying small bo";
int err; int err;
struct xe_bo *sysmem = xe_bo_create_locked(xe, gt_to_tile(m->gt), NULL, struct xe_bo *sysmem = xe_bo_create_locked(xe, m->tile, NULL,
bo->size, bo->size,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT); XE_BO_CREATE_SYSTEM_BIT);
...@@ -189,7 +189,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, ...@@ -189,7 +189,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt, static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
struct kunit *test, bool force_gpu) struct kunit *test, bool force_gpu)
{ {
struct xe_device *xe = gt_to_xe(m->gt); struct xe_device *xe = tile_to_xe(m->tile);
struct dma_fence *fence; struct dma_fence *fence;
u64 retval, expected; u64 retval, expected;
ktime_t then, now; ktime_t then, now;
...@@ -239,16 +239,15 @@ static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt, ...@@ -239,16 +239,15 @@ static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
{ {
struct xe_gt *gt = m->gt; struct xe_tile *tile = m->tile;
struct xe_tile *tile = gt_to_tile(m->gt); struct xe_device *xe = tile_to_xe(tile);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny; struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
struct xe_res_cursor src_it; struct xe_res_cursor src_it;
struct dma_fence *fence; struct dma_fence *fence;
u64 retval, expected; u64 retval, expected;
struct xe_bb *bb; struct xe_bb *bb;
int err; int err;
u8 id = gt->info.id; u8 id = tile->id;
err = xe_bo_vmap(bo); err = xe_bo_vmap(bo);
if (err) { if (err) {
...@@ -287,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -287,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
goto free_pt; goto free_pt;
} }
bb = xe_bb_new(gt, 32, xe->info.supports_usm); bb = xe_bb_new(&tile->primary_gt, 32, xe->info.supports_usm);
if (IS_ERR(bb)) { if (IS_ERR(bb)) {
KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n", KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
PTR_ERR(bb)); PTR_ERR(bb));
...@@ -324,7 +323,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -324,7 +323,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead); xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
expected = 0; expected = 0;
emit_clear(m->gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4, emit_clear(&tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
IS_DGFX(xe)); IS_DGFX(xe));
run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable", run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
test); test);
...@@ -391,14 +390,14 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -391,14 +390,14 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
static int migrate_test_run_device(struct xe_device *xe) static int migrate_test_run_device(struct xe_device *xe)
{ {
struct kunit *test = xe_cur_kunit(); struct kunit *test = xe_cur_kunit();
struct xe_gt *gt; struct xe_tile *tile;
int id; int id;
for_each_gt(gt, xe, id) { for_each_tile(tile, xe, id) {
struct xe_migrate *m = gt->migrate; struct xe_migrate *m = tile->migrate;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
kunit_info(test, "Testing gt id %d.\n", id); kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->eng->vm, &ww, 0, true); xe_vm_lock(m->eng->vm, &ww, 0, true);
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
xe_migrate_sanity_test(m, test); xe_migrate_sanity_test(m, test);
......
...@@ -643,7 +643,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -643,7 +643,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
tile = mem_type_to_tile(xe, old_mem->mem_type); tile = mem_type_to_tile(xe, old_mem->mem_type);
XE_BUG_ON(!tile); XE_BUG_ON(!tile);
XE_BUG_ON(!tile->primary_gt.migrate); XE_BUG_ON(!tile->migrate);
trace_xe_bo_move(bo); trace_xe_bo_move(bo);
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
...@@ -681,9 +681,9 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -681,9 +681,9 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
} }
} else { } else {
if (move_lacks_source) if (move_lacks_source)
fence = xe_migrate_clear(tile->primary_gt.migrate, bo, new_mem); fence = xe_migrate_clear(tile->migrate, bo, new_mem);
else else
fence = xe_migrate_copy(tile->primary_gt.migrate, fence = xe_migrate_copy(tile->migrate,
bo, bo, old_mem, new_mem); bo, bo, old_mem, new_mem);
if (IS_ERR(fence)) { if (IS_ERR(fence)) {
ret = PTR_ERR(fence); ret = PTR_ERR(fence);
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include "xe_bo.h" #include "xe_bo.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_ggtt.h" #include "xe_ggtt.h"
#include "xe_gt.h" #include "xe_tile.h"
/** /**
* xe_bo_evict_all - evict all BOs from VRAM * xe_bo_evict_all - evict all BOs from VRAM
...@@ -29,7 +29,7 @@ int xe_bo_evict_all(struct xe_device *xe) ...@@ -29,7 +29,7 @@ int xe_bo_evict_all(struct xe_device *xe)
struct ttm_device *bdev = &xe->ttm; struct ttm_device *bdev = &xe->ttm;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
struct xe_bo *bo; struct xe_bo *bo;
struct xe_gt *gt; struct xe_tile *tile;
struct list_head still_in_list; struct list_head still_in_list;
u32 mem_type; u32 mem_type;
u8 id; u8 id;
...@@ -83,8 +83,8 @@ int xe_bo_evict_all(struct xe_device *xe) ...@@ -83,8 +83,8 @@ int xe_bo_evict_all(struct xe_device *xe)
* Wait for all user BO to be evicted as those evictions depend on the * Wait for all user BO to be evicted as those evictions depend on the
* memory moved below. * memory moved below.
*/ */
for_each_gt(gt, xe, id) for_each_tile(tile, xe, id)
xe_gt_migrate_wait(gt); xe_tile_migrate_wait(tile);
spin_lock(&xe->pinned.lock); spin_lock(&xe->pinned.lock);
for (;;) { for (;;) {
...@@ -186,7 +186,7 @@ int xe_bo_restore_user(struct xe_device *xe) ...@@ -186,7 +186,7 @@ int xe_bo_restore_user(struct xe_device *xe)
{ {
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
struct xe_bo *bo; struct xe_bo *bo;
struct xe_gt *gt; struct xe_tile *tile;
struct list_head still_in_list; struct list_head still_in_list;
u8 id; u8 id;
int ret; int ret;
...@@ -224,8 +224,8 @@ int xe_bo_restore_user(struct xe_device *xe) ...@@ -224,8 +224,8 @@ int xe_bo_restore_user(struct xe_device *xe)
spin_unlock(&xe->pinned.lock); spin_unlock(&xe->pinned.lock);
/* Wait for validate to complete */ /* Wait for validate to complete */
for_each_gt(gt, xe, id) for_each_tile(tile, xe, id)
xe_gt_migrate_wait(gt); xe_tile_migrate_wait(tile);
return 0; return 0;
} }
...@@ -136,6 +136,9 @@ struct xe_tile { ...@@ -136,6 +136,9 @@ struct xe_tile {
*/ */
struct xe_sa_manager *kernel_bb_pool; struct xe_sa_manager *kernel_bb_pool;
} mem; } mem;
/** @migrate: Migration helper for vram blits and clearing */
struct xe_migrate *migrate;
}; };
/** /**
......
...@@ -560,7 +560,7 @@ int xe_engine_create_ioctl(struct drm_device *dev, void *data, ...@@ -560,7 +560,7 @@ int xe_engine_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_ERR(xe, !hwe)) if (XE_IOCTL_ERR(xe, !hwe))
return -EINVAL; return -EINVAL;
migrate_vm = xe_migrate_get_vm(gt->migrate); migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
new = xe_engine_create(xe, migrate_vm, logical_mask, new = xe_engine_create(xe, migrate_vm, logical_mask,
args->width, hwe, args->width, hwe,
ENGINE_FLAG_PERSISTENT | ENGINE_FLAG_PERSISTENT |
......
...@@ -43,15 +43,6 @@ ...@@ -43,15 +43,6 @@
#include "xe_wa.h" #include "xe_wa.h"
#include "xe_wopcm.h" #include "xe_wopcm.h"
struct xe_gt *xe_find_full_gt(struct xe_gt *gt)
{
/*
* FIXME: Once the code is prepared for re-enabling, this function will
* be gone. Just return the only possible gt for now.
*/
return gt;
}
int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt) int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt)
{ {
XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED); XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED);
...@@ -169,6 +160,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e) ...@@ -169,6 +160,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
int xe_gt_record_default_lrcs(struct xe_gt *gt) int xe_gt_record_default_lrcs(struct xe_gt *gt)
{ {
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_hw_engine *hwe; struct xe_hw_engine *hwe;
enum xe_hw_engine_id id; enum xe_hw_engine_id id;
int err = 0; int err = 0;
...@@ -192,7 +184,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) ...@@ -192,7 +184,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
if (!default_lrc) if (!default_lrc)
return -ENOMEM; return -ENOMEM;
vm = xe_migrate_get_vm(gt->migrate); vm = xe_migrate_get_vm(tile->migrate);
e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1, e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1,
hwe, ENGINE_FLAG_WA); hwe, ENGINE_FLAG_WA);
if (IS_ERR(e)) { if (IS_ERR(e)) {
...@@ -383,13 +375,13 @@ static int all_fw_domain_init(struct xe_gt *gt) ...@@ -383,13 +375,13 @@ static int all_fw_domain_init(struct xe_gt *gt)
} }
if (!xe_gt_is_media_type(gt)) { if (!xe_gt_is_media_type(gt)) {
gt->migrate = xe_migrate_init(gt); struct xe_tile *tile = gt_to_tile(gt);
if (IS_ERR(gt->migrate)) {
err = PTR_ERR(gt->migrate); tile->migrate = xe_migrate_init(tile);
if (IS_ERR(tile->migrate)) {
err = PTR_ERR(tile->migrate);
goto err_force_wake; goto err_force_wake;
} }
} else {
gt->migrate = xe_find_full_gt(gt)->migrate;
} }
err = xe_uc_init_hw(&gt->uc); err = xe_uc_init_hw(&gt->uc);
...@@ -644,11 +636,6 @@ int xe_gt_resume(struct xe_gt *gt) ...@@ -644,11 +636,6 @@ int xe_gt_resume(struct xe_gt *gt)
return err; return err;
} }
void xe_gt_migrate_wait(struct xe_gt *gt)
{
xe_migrate_wait(gt->migrate);
}
struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt, struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
enum xe_engine_class class, enum xe_engine_class class,
u16 instance, bool logical) u16 instance, bool logical)
......
...@@ -24,11 +24,8 @@ void xe_gt_suspend_prepare(struct xe_gt *gt); ...@@ -24,11 +24,8 @@ void xe_gt_suspend_prepare(struct xe_gt *gt);
int xe_gt_suspend(struct xe_gt *gt); int xe_gt_suspend(struct xe_gt *gt);
int xe_gt_resume(struct xe_gt *gt); int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt); void xe_gt_reset_async(struct xe_gt *gt);
void xe_gt_migrate_wait(struct xe_gt *gt);
void xe_gt_sanitize(struct xe_gt *gt); void xe_gt_sanitize(struct xe_gt *gt);
struct xe_gt *xe_find_full_gt(struct xe_gt *gt);
/** /**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the * xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
* first that matches the same reset domain as @class * first that matches the same reset domain as @class
......
...@@ -208,7 +208,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) ...@@ -208,7 +208,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
/* Bind VMA only to the GT that has faulted */ /* Bind VMA only to the GT that has faulted */
trace_xe_vma_pf_bind(vma); trace_xe_vma_pf_bind(vma);
fence = __xe_pt_bind_vma(tile, vma, xe_gt_migrate_engine(gt), NULL, 0, fence = __xe_pt_bind_vma(tile, vma, xe_tile_migrate_engine(tile), NULL, 0,
vma->tile_present & BIT(tile->id)); vma->tile_present & BIT(tile->id));
if (IS_ERR(fence)) { if (IS_ERR(fence)) {
ret = PTR_ERR(fence); ret = PTR_ERR(fence);
......
...@@ -278,9 +278,6 @@ struct xe_gt { ...@@ -278,9 +278,6 @@ struct xe_gt {
/** @hw_engines: hardware engines on the GT */ /** @hw_engines: hardware engines on the GT */
struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES]; struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES];
/** @migrate: Migration helper for vram blits and clearing */
struct xe_migrate *migrate;
/** @pcode: GT's PCODE */ /** @pcode: GT's PCODE */
struct { struct {
/** @lock: protecting GT's PCODE mailbox data */ /** @lock: protecting GT's PCODE mailbox data */
......
...@@ -36,8 +36,8 @@ ...@@ -36,8 +36,8 @@
struct xe_migrate { struct xe_migrate {
/** @eng: Default engine used for migration */ /** @eng: Default engine used for migration */
struct xe_engine *eng; struct xe_engine *eng;
/** @gt: Backpointer to the gt this struct xe_migrate belongs to. */ /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
struct xe_gt *gt; struct xe_tile *tile;
/** @job_mutex: Timeline mutex for @eng. */ /** @job_mutex: Timeline mutex for @eng. */
struct mutex job_mutex; struct mutex job_mutex;
/** @pt_bo: Page-table buffer object. */ /** @pt_bo: Page-table buffer object. */
...@@ -70,17 +70,17 @@ struct xe_migrate { ...@@ -70,17 +70,17 @@ struct xe_migrate {
#define NUM_PT_PER_BLIT (MAX_PREEMPTDISABLE_TRANSFER / SZ_2M) #define NUM_PT_PER_BLIT (MAX_PREEMPTDISABLE_TRANSFER / SZ_2M)
/** /**
* xe_gt_migrate_engine() - Get this gt's migrate engine. * xe_tile_migrate_engine() - Get this tile's migrate engine.
* @gt: The gt. * @tile: The tile.
* *
* Returns the default migrate engine of this gt. * Returns the default migrate engine of this tile.
* TODO: Perhaps this function is slightly misplaced, and even unneeded? * TODO: Perhaps this function is slightly misplaced, and even unneeded?
* *
* Return: The default migrate engine * Return: The default migrate engine
*/ */
struct xe_engine *xe_gt_migrate_engine(struct xe_gt *gt) struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile)
{ {
return gt->migrate->eng; return tile->migrate->eng;
} }
static void xe_migrate_fini(struct drm_device *dev, void *arg) static void xe_migrate_fini(struct drm_device *dev, void *arg)
...@@ -128,8 +128,7 @@ static u64 xe_migrate_vram_ofs(u64 addr) ...@@ -128,8 +128,7 @@ static u64 xe_migrate_vram_ofs(u64 addr)
*/ */
static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm) static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
{ {
struct xe_gt *gt = m->gt; struct xe_tile *tile = m->tile;
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = vm->xe; struct xe_device *xe = vm->xe;
size_t cleared_size; size_t cleared_size;
u64 vram_addr; u64 vram_addr;
...@@ -155,14 +154,13 @@ static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm) ...@@ -155,14 +154,13 @@ static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
return 0; return 0;
} }
static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m, static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
struct xe_vm *vm) struct xe_vm *vm)
{ {
u8 id = gt->info.id; struct xe_device *xe = tile_to_xe(tile);
u8 id = tile->id;
u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level; u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
u32 map_ofs, level, i; u32 map_ofs, level, i;
struct xe_device *xe = gt_to_xe(m->gt);
struct xe_tile *tile = gt_to_tile(m->gt);
struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo; struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
u64 entry; u64 entry;
int ret; int ret;
...@@ -231,7 +229,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m, ...@@ -231,7 +229,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr); m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
if (xe->info.supports_usm) { if (xe->info.supports_usm) {
batch = gt->usm.bb_pool->bo; batch = tile->primary_gt.usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE, batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE,
&is_vram); &is_vram);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr); m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
...@@ -308,34 +306,33 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m, ...@@ -308,34 +306,33 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
/** /**
* xe_migrate_init() - Initialize a migrate context * xe_migrate_init() - Initialize a migrate context
* @gt: Back-pointer to the gt we're initializing for. * @tile: Back-pointer to the tile we're initializing for.
* *
* Return: Pointer to a migrate context on success. Error pointer on error. * Return: Pointer to a migrate context on success. Error pointer on error.
*/ */
struct xe_migrate *xe_migrate_init(struct xe_gt *gt) struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
{ {
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = tile_to_xe(tile);
struct xe_gt *primary_gt = &tile->primary_gt;
struct xe_migrate *m; struct xe_migrate *m;
struct xe_vm *vm; struct xe_vm *vm;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
int err; int err;
XE_BUG_ON(xe_gt_is_media_type(gt));
m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL); m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
if (!m) if (!m)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
m->gt = gt; m->tile = tile;
/* Special layout, prepared below.. */ /* Special layout, prepared below.. */
vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION | vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
XE_VM_FLAG_SET_GT_ID(gt)); XE_VM_FLAG_SET_TILE_ID(tile));
if (IS_ERR(vm)) if (IS_ERR(vm))
return ERR_CAST(vm); return ERR_CAST(vm);
xe_vm_lock(vm, &ww, 0, false); xe_vm_lock(vm, &ww, 0, false);
err = xe_migrate_prepare_vm(gt, m, vm); err = xe_migrate_prepare_vm(tile, m, vm);
xe_vm_unlock(vm, &ww); xe_vm_unlock(vm, &ww);
if (err) { if (err) {
xe_vm_close_and_put(vm); xe_vm_close_and_put(vm);
...@@ -343,9 +340,9 @@ struct xe_migrate *xe_migrate_init(struct xe_gt *gt) ...@@ -343,9 +340,9 @@ struct xe_migrate *xe_migrate_init(struct xe_gt *gt)
} }
if (xe->info.supports_usm) { if (xe->info.supports_usm) {
struct xe_hw_engine *hwe = xe_gt_hw_engine(gt, struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
XE_ENGINE_CLASS_COPY, XE_ENGINE_CLASS_COPY,
gt->usm.reserved_bcs_instance, primary_gt->usm.reserved_bcs_instance,
false); false);
if (!hwe) if (!hwe)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -354,7 +351,7 @@ struct xe_migrate *xe_migrate_init(struct xe_gt *gt) ...@@ -354,7 +351,7 @@ struct xe_migrate *xe_migrate_init(struct xe_gt *gt)
BIT(hwe->logical_instance), 1, BIT(hwe->logical_instance), 1,
hwe, ENGINE_FLAG_KERNEL); hwe, ENGINE_FLAG_KERNEL);
} else { } else {
m->eng = xe_engine_create_class(xe, gt, vm, m->eng = xe_engine_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY, XE_ENGINE_CLASS_COPY,
ENGINE_FLAG_KERNEL); ENGINE_FLAG_KERNEL);
} }
...@@ -549,7 +546,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m, ...@@ -549,7 +546,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
u64 dst_ofs, bool dst_is_vram, u32 dst_size, u64 dst_ofs, bool dst_is_vram, u32 dst_size,
u64 ccs_ofs, bool copy_ccs) u64 ccs_ofs, bool copy_ccs)
{ {
struct xe_gt *gt = m->gt; struct xe_gt *gt = &m->tile->primary_gt;
u32 flush_flags = 0; u32 flush_flags = 0;
if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_vram) { if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_vram) {
...@@ -613,7 +610,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -613,7 +610,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *src, struct ttm_resource *src,
struct ttm_resource *dst) struct ttm_resource *dst)
{ {
struct xe_gt *gt = m->gt; struct xe_gt *gt = &m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
u64 size = src_bo->size; u64 size = src_bo->size;
...@@ -876,7 +873,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, ...@@ -876,7 +873,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct ttm_resource *dst) struct ttm_resource *dst)
{ {
bool clear_vram = mem_type_is_vram(dst->mem_type); bool clear_vram = mem_type_is_vram(dst->mem_type);
struct xe_gt *gt = m->gt; struct xe_gt *gt = &m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
u64 size = bo->size; u64 size = bo->size;
...@@ -1083,7 +1080,7 @@ xe_migrate_update_pgtables_cpu(struct xe_migrate *m, ...@@ -1083,7 +1080,7 @@ xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
for (i = 0; i < num_updates; i++) { for (i = 0; i < num_updates; i++) {
const struct xe_vm_pgtable_update *update = &updates[i]; const struct xe_vm_pgtable_update *update = &updates[i];
ops->populate(pt_update, gt_to_tile(m->gt), &update->pt_bo->vmap, NULL, ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
update->ofs, update->qwords, update); update->ofs, update->qwords, update);
} }
...@@ -1150,9 +1147,9 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1150,9 +1147,9 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
struct xe_migrate_pt_update *pt_update) struct xe_migrate_pt_update *pt_update)
{ {
const struct xe_migrate_pt_update_ops *ops = pt_update->ops; const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
struct xe_gt *gt = m->gt; struct xe_tile *tile = m->tile;
struct xe_tile *tile = gt_to_tile(m->gt); struct xe_gt *gt = &tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = tile_to_xe(tile);
struct xe_sched_job *job; struct xe_sched_job *job;
struct dma_fence *fence; struct dma_fence *fence;
struct drm_suballoc *sa_bo = NULL; struct drm_suballoc *sa_bo = NULL;
......
...@@ -71,7 +71,7 @@ struct xe_migrate_pt_update { ...@@ -71,7 +71,7 @@ struct xe_migrate_pt_update {
struct xe_vma *vma; struct xe_vma *vma;
}; };
struct xe_migrate *xe_migrate_init(struct xe_gt *gt); struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo, struct xe_bo *src_bo,
...@@ -97,5 +97,5 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -97,5 +97,5 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
void xe_migrate_wait(struct xe_migrate *m); void xe_migrate_wait(struct xe_migrate *m);
struct xe_engine *xe_gt_migrate_engine(struct xe_gt *gt); struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile);
#endif #endif
...@@ -1303,7 +1303,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, ...@@ -1303,7 +1303,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
fence = xe_migrate_update_pgtables(tile->primary_gt.migrate, fence = xe_migrate_update_pgtables(tile->migrate,
vm, vma->bo, vm, vma->bo,
e ? e : vm->eng[tile->id], e ? e : vm->eng[tile->id],
entries, num_entries, entries, num_entries,
...@@ -1624,7 +1624,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e ...@@ -1624,7 +1624,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
* clear again here. The eviction may have updated pagetables at a * clear again here. The eviction may have updated pagetables at a
* lower level, because it needs to be more conservative. * lower level, because it needs to be more conservative.
*/ */
fence = xe_migrate_update_pgtables(tile->primary_gt.migrate, fence = xe_migrate_update_pgtables(tile->migrate,
vm, NULL, e ? e : vm, NULL, e ? e :
vm->eng[tile->id], vm->eng[tile->id],
entries, num_entries, entries, num_entries,
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "xe_device.h" #include "xe_device.h"
#include "xe_ggtt.h" #include "xe_ggtt.h"
#include "xe_migrate.h"
#include "xe_sa.h" #include "xe_sa.h"
#include "xe_tile.h" #include "xe_tile.h"
#include "xe_ttm_vram_mgr.h" #include "xe_ttm_vram_mgr.h"
...@@ -88,3 +89,8 @@ int xe_tile_init_noalloc(struct xe_tile *tile) ...@@ -88,3 +89,8 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
xe_device_mem_access_put(tile_to_xe(tile)); xe_device_mem_access_put(tile_to_xe(tile));
return err; return err;
} }
void xe_tile_migrate_wait(struct xe_tile *tile)
{
xe_migrate_wait(tile->migrate);
}
...@@ -11,4 +11,6 @@ struct xe_tile; ...@@ -11,4 +11,6 @@ struct xe_tile;
int xe_tile_alloc(struct xe_tile *tile); int xe_tile_alloc(struct xe_tile *tile);
int xe_tile_init_noalloc(struct xe_tile *tile); int xe_tile_init_noalloc(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
#endif #endif
...@@ -1267,7 +1267,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) ...@@ -1267,7 +1267,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
if (!vm->pt_root[id]) if (!vm->pt_root[id])
continue; continue;
migrate_vm = xe_migrate_get_vm(gt->migrate); migrate_vm = xe_migrate_get_vm(tile->migrate);
eng = xe_engine_create_class(xe, gt, migrate_vm, eng = xe_engine_create_class(xe, gt, migrate_vm,
XE_ENGINE_CLASS_COPY, XE_ENGINE_CLASS_COPY,
ENGINE_FLAG_VM); ENGINE_FLAG_VM);
......
...@@ -179,7 +179,7 @@ struct xe_vm { ...@@ -179,7 +179,7 @@ struct xe_vm {
#define XE_VM_FLAG_SCRATCH_PAGE BIT(4) #define XE_VM_FLAG_SCRATCH_PAGE BIT(4)
#define XE_VM_FLAG_FAULT_MODE BIT(5) #define XE_VM_FLAG_FAULT_MODE BIT(5)
#define XE_VM_FLAG_GT_ID(flags) (((flags) >> 6) & 0x3) #define XE_VM_FLAG_GT_ID(flags) (((flags) >> 6) & 0x3)
#define XE_VM_FLAG_SET_GT_ID(gt) ((gt)->info.id << 6) #define XE_VM_FLAG_SET_TILE_ID(tile) ((tile)->id << 6)
unsigned long flags; unsigned long flags;
/** @composite_fence_ctx: context composite fence */ /** @composite_fence_ctx: context composite fence */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment