Commit 21ed3327 authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Add helpers to hide struct xe_vma internals

This will help with the GPUVA port as the internals of struct xe_vma
will change.

v2: Update comment around helpers
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.kernel.org>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 9d858b69
...@@ -426,7 +426,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, ...@@ -426,7 +426,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
} }
list_for_each_entry(vma, &bo->vmas, bo_link) { list_for_each_entry(vma, &bo->vmas, bo_link) {
struct xe_vm *vm = vma->vm; struct xe_vm *vm = xe_vma_vm(vma);
trace_xe_vma_evict(vma); trace_xe_vma_evict(vma);
...@@ -454,7 +454,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, ...@@ -454,7 +454,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
} else { } else {
bool vm_resv_locked = false; bool vm_resv_locked = false;
struct xe_vm *vm = vma->vm; struct xe_vm *vm = xe_vma_vm(vma);
/* /*
* We need to put the vma on the vm's rebind_list, * We need to put the vma on the vm's rebind_list,
......
...@@ -126,7 +126,7 @@ static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww, ...@@ -126,7 +126,7 @@ static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww,
if (xe_vma_is_userptr(vma)) if (xe_vma_is_userptr(vma))
continue; continue;
err = xe_bo_validate(vma->bo, vm, false); err = xe_bo_validate(xe_vma_bo(vma), vm, false);
if (err) { if (err) {
xe_vm_unlock_dma_resv(vm, tv_onstack, *tv, ww, objs); xe_vm_unlock_dma_resv(vm, tv_onstack, *tv, ww, objs);
*tv = NULL; *tv = NULL;
......
...@@ -77,7 +77,8 @@ static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma) ...@@ -77,7 +77,8 @@ static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
static bool vma_matches(struct xe_vma *vma, struct xe_vma *lookup) static bool vma_matches(struct xe_vma *vma, struct xe_vma *lookup)
{ {
if (lookup->start > vma->end || lookup->end < vma->start) if (xe_vma_start(lookup) > xe_vma_end(vma) - 1 ||
xe_vma_end(lookup) - 1 < xe_vma_start(vma))
return false; return false;
return true; return true;
...@@ -171,7 +172,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) ...@@ -171,7 +172,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
} }
/* Lock VM and BOs dma-resv */ /* Lock VM and BOs dma-resv */
bo = vma->bo; bo = xe_vma_bo(vma);
if (only_needs_bo_lock(bo)) { if (only_needs_bo_lock(bo)) {
/* This path ensures the BO's LRU is updated */ /* This path ensures the BO's LRU is updated */
ret = xe_bo_lock(bo, &ww, xe->info.tile_count, false); ret = xe_bo_lock(bo, &ww, xe->info.tile_count, false);
...@@ -538,7 +539,7 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc) ...@@ -538,7 +539,7 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
goto unlock_vm; goto unlock_vm;
/* Lock VM and BOs dma-resv */ /* Lock VM and BOs dma-resv */
bo = vma->bo; bo = xe_vma_bo(vma);
if (only_needs_bo_lock(bo)) { if (only_needs_bo_lock(bo)) {
/* This path ensures the BO's LRU is updated */ /* This path ensures the BO's LRU is updated */
ret = xe_bo_lock(bo, &ww, xe->info.tile_count, false); ret = xe_bo_lock(bo, &ww, xe->info.tile_count, false);
......
...@@ -203,8 +203,8 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, ...@@ -203,8 +203,8 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
if (!xe->info.has_range_tlb_invalidation) { if (!xe->info.has_range_tlb_invalidation) {
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL); action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
} else { } else {
u64 start = vma->start; u64 start = xe_vma_start(vma);
u64 length = vma->end - vma->start + 1; u64 length = xe_vma_size(vma);
u64 align, end; u64 align, end;
if (length < SZ_4K) if (length < SZ_4K)
...@@ -217,12 +217,12 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, ...@@ -217,12 +217,12 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
* address mask covering the required range. * address mask covering the required range.
*/ */
align = roundup_pow_of_two(length); align = roundup_pow_of_two(length);
start = ALIGN_DOWN(vma->start, align); start = ALIGN_DOWN(xe_vma_start(vma), align);
end = ALIGN(vma->start + length, align); end = ALIGN(xe_vma_end(vma), align);
length = align; length = align;
while (start + length < end) { while (start + length < end) {
length <<= 1; length <<= 1;
start = ALIGN_DOWN(vma->start, length); start = ALIGN_DOWN(xe_vma_start(vma), length);
} }
/* /*
...@@ -231,7 +231,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, ...@@ -231,7 +231,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
*/ */
if (length >= SZ_2M) { if (length >= SZ_2M) {
length = max_t(u64, SZ_16M, length); length = max_t(u64, SZ_16M, length);
start = ALIGN_DOWN(vma->start, length); start = ALIGN_DOWN(xe_vma_start(vma), length);
} }
XE_BUG_ON(length < SZ_4K); XE_BUG_ON(length < SZ_4K);
...@@ -240,7 +240,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, ...@@ -240,7 +240,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
XE_BUG_ON(!IS_ALIGNED(start, length)); XE_BUG_ON(!IS_ALIGNED(start, length));
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE); action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
action[len++] = vma->vm->usm.asid; action[len++] = xe_vma_vm(vma)->usm.asid;
action[len++] = lower_32_bits(start); action[len++] = lower_32_bits(start);
action[len++] = upper_32_bits(start); action[len++] = upper_32_bits(start);
action[len++] = ilog2(length) - ilog2(SZ_4K); action[len++] = ilog2(length) - ilog2(SZ_4K);
......
...@@ -96,7 +96,7 @@ static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset, ...@@ -96,7 +96,7 @@ static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
&cur); &cur);
return xe_res_dma(&cur) + offset; return xe_res_dma(&cur) + offset;
} else { } else {
return xe_bo_addr(vma->bo, offset, page_size, is_vram); return xe_bo_addr(xe_vma_bo(vma), offset, page_size, is_vram);
} }
} }
...@@ -749,7 +749,7 @@ static int ...@@ -749,7 +749,7 @@ static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_vm_pgtable_update *entries, u32 *num_entries) struct xe_vm_pgtable_update *entries, u32 *num_entries)
{ {
struct xe_bo *bo = vma->bo; struct xe_bo *bo = xe_vma_bo(vma);
bool is_vram = !xe_vma_is_userptr(vma) && bo && xe_bo_is_vram(bo); bool is_vram = !xe_vma_is_userptr(vma) && bo && xe_bo_is_vram(bo);
struct xe_res_cursor curs; struct xe_res_cursor curs;
struct xe_pt_stage_bind_walk xe_walk = { struct xe_pt_stage_bind_walk xe_walk = {
...@@ -758,15 +758,15 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, ...@@ -758,15 +758,15 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.shifts = xe_normal_pt_shifts, .shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL, .max_level = XE_PT_HIGHEST_LEVEL,
}, },
.vm = vma->vm, .vm = xe_vma_vm(vma),
.tile = tile, .tile = tile,
.curs = &curs, .curs = &curs,
.va_curs_start = vma->start, .va_curs_start = xe_vma_start(vma),
.pte_flags = vma->pte_flags, .pte_flags = vma->pte_flags,
.wupd.entries = entries, .wupd.entries = entries,
.needs_64K = (vma->vm->flags & XE_VM_FLAGS_64K) && is_vram, .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAGS_64K) && is_vram,
}; };
struct xe_pt *pt = vma->vm->pt_root[tile->id]; struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
int ret; int ret;
if (is_vram) { if (is_vram) {
...@@ -788,20 +788,20 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, ...@@ -788,20 +788,20 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma)) { if (!xe_vma_is_null(vma)) {
if (xe_vma_is_userptr(vma)) if (xe_vma_is_userptr(vma))
xe_res_first_sg(vma->userptr.sg, 0, xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma),
vma->end - vma->start + 1, &curs); &curs);
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
xe_res_first(bo->ttm.resource, vma->bo_offset, xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
vma->end - vma->start + 1, &curs); xe_vma_size(vma), &curs);
else else
xe_res_first_sg(xe_bo_get_sg(bo), vma->bo_offset, xe_res_first_sg(xe_bo_get_sg(bo), xe_vma_bo_offset(vma),
vma->end - vma->start + 1, &curs); xe_vma_size(vma), &curs);
} else { } else {
curs.size = vma->end - vma->start + 1; curs.size = xe_vma_size(vma);
} }
ret = xe_pt_walk_range(&pt->base, pt->level, vma->start, vma->end + 1, ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma),
&xe_walk.base); xe_vma_end(vma), &xe_walk.base);
*num_entries = xe_walk.wupd.num_used_entries; *num_entries = xe_walk.wupd.num_used_entries;
return ret; return ret;
...@@ -933,13 +933,13 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma) ...@@ -933,13 +933,13 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
}, },
.tile = tile, .tile = tile,
}; };
struct xe_pt *pt = vma->vm->pt_root[tile->id]; struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
if (!(vma->tile_present & BIT(tile->id))) if (!(vma->tile_present & BIT(tile->id)))
return false; return false;
(void)xe_pt_walk_shared(&pt->base, pt->level, vma->start, vma->end + 1, (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
&xe_walk.base); xe_vma_end(vma), &xe_walk.base);
return xe_walk.needs_invalidate; return xe_walk.needs_invalidate;
} }
...@@ -974,21 +974,21 @@ static void xe_pt_abort_bind(struct xe_vma *vma, ...@@ -974,21 +974,21 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
continue; continue;
for (j = 0; j < entries[i].qwords; j++) for (j = 0; j < entries[i].qwords; j++)
xe_pt_destroy(entries[i].pt_entries[j].pt, vma->vm->flags, NULL); xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL);
kfree(entries[i].pt_entries); kfree(entries[i].pt_entries);
} }
} }
static void xe_pt_commit_locks_assert(struct xe_vma *vma) static void xe_pt_commit_locks_assert(struct xe_vma *vma)
{ {
struct xe_vm *vm = vma->vm; struct xe_vm *vm = xe_vma_vm(vma);
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
if (xe_vma_is_userptr(vma)) if (xe_vma_is_userptr(vma))
lockdep_assert_held_read(&vm->userptr.notifier_lock); lockdep_assert_held_read(&vm->userptr.notifier_lock);
else if (!xe_vma_is_null(vma)) else if (!xe_vma_is_null(vma))
dma_resv_assert_held(vma->bo->ttm.base.resv); dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
dma_resv_assert_held(&vm->resv); dma_resv_assert_held(&vm->resv);
} }
...@@ -1021,7 +1021,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma, ...@@ -1021,7 +1021,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
if (xe_pt_entry(pt_dir, j_)) if (xe_pt_entry(pt_dir, j_))
xe_pt_destroy(xe_pt_entry(pt_dir, j_), xe_pt_destroy(xe_pt_entry(pt_dir, j_),
vma->vm->flags, deferred); xe_vma_vm(vma)->flags, deferred);
pt_dir->dir.entries[j_] = &newpte->base; pt_dir->dir.entries[j_] = &newpte->base;
} }
...@@ -1082,7 +1082,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma) ...@@ -1082,7 +1082,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
static u32 count; static u32 count;
if (count++ % divisor == divisor - 1) { if (count++ % divisor == divisor - 1) {
struct xe_vm *vm = vma->vm; struct xe_vm *vm = xe_vma_vm(vma);
vma->userptr.divisor = divisor << 1; vma->userptr.divisor = divisor << 1;
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
...@@ -1125,7 +1125,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) ...@@ -1125,7 +1125,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
container_of(pt_update, typeof(*userptr_update), base); container_of(pt_update, typeof(*userptr_update), base);
struct xe_vma *vma = pt_update->vma; struct xe_vma *vma = pt_update->vma;
unsigned long notifier_seq = vma->userptr.notifier_seq; unsigned long notifier_seq = vma->userptr.notifier_seq;
struct xe_vm *vm = vma->vm; struct xe_vm *vm = xe_vma_vm(vma);
userptr_update->locked = false; userptr_update->locked = false;
...@@ -1296,19 +1296,19 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, ...@@ -1296,19 +1296,19 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
}, },
.bind = true, .bind = true,
}; };
struct xe_vm *vm = vma->vm; struct xe_vm *vm = xe_vma_vm(vma);
u32 num_entries; u32 num_entries;
struct dma_fence *fence; struct dma_fence *fence;
struct invalidation_fence *ifence = NULL; struct invalidation_fence *ifence = NULL;
int err; int err;
bind_pt_update.locked = false; bind_pt_update.locked = false;
xe_bo_assert_held(vma->bo); xe_bo_assert_held(xe_vma_bo(vma));
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
vm_dbg(&vma->vm->xe->drm, vm_dbg(&xe_vma_vm(vma)->xe->drm,
"Preparing bind, with range [%llx...%llx) engine %p.\n", "Preparing bind, with range [%llx...%llx) engine %p.\n",
vma->start, vma->end, e); xe_vma_start(vma), xe_vma_end(vma) - 1, e);
err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind); err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
if (err) if (err)
...@@ -1337,7 +1337,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, ...@@ -1337,7 +1337,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
} }
fence = xe_migrate_update_pgtables(tile->migrate, fence = xe_migrate_update_pgtables(tile->migrate,
vm, vma->bo, vm, xe_vma_bo(vma),
e ? e : vm->eng[tile->id], e ? e : vm->eng[tile->id],
entries, num_entries, entries, num_entries,
syncs, num_syncs, syncs, num_syncs,
...@@ -1363,8 +1363,8 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, ...@@ -1363,8 +1363,8 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
if (!xe_vma_has_no_bo(vma) && !vma->bo->vm) if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
dma_resv_add_fence(vma->bo->ttm.base.resv, fence, dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
xe_pt_commit_bind(vma, entries, num_entries, rebind, xe_pt_commit_bind(vma, entries, num_entries, rebind,
bind_pt_update.locked ? &deferred : NULL); bind_pt_update.locked ? &deferred : NULL);
...@@ -1526,14 +1526,14 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma, ...@@ -1526,14 +1526,14 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
.max_level = XE_PT_HIGHEST_LEVEL, .max_level = XE_PT_HIGHEST_LEVEL,
}, },
.tile = tile, .tile = tile,
.modified_start = vma->start, .modified_start = xe_vma_start(vma),
.modified_end = vma->end + 1, .modified_end = xe_vma_end(vma),
.wupd.entries = entries, .wupd.entries = entries,
}; };
struct xe_pt *pt = vma->vm->pt_root[tile->id]; struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
(void)xe_pt_walk_shared(&pt->base, pt->level, vma->start, vma->end + 1, (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
&xe_walk.base); xe_vma_end(vma), &xe_walk.base);
return xe_walk.wupd.num_used_entries; return xe_walk.wupd.num_used_entries;
} }
...@@ -1545,7 +1545,7 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, ...@@ -1545,7 +1545,7 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
const struct xe_vm_pgtable_update *update) const struct xe_vm_pgtable_update *update)
{ {
struct xe_vma *vma = pt_update->vma; struct xe_vma *vma = pt_update->vma;
u64 empty = __xe_pt_empty_pte(tile, vma->vm, update->pt->level); u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level);
int i; int i;
if (map && map->is_iomem) if (map && map->is_iomem)
...@@ -1581,7 +1581,7 @@ xe_pt_commit_unbind(struct xe_vma *vma, ...@@ -1581,7 +1581,7 @@ xe_pt_commit_unbind(struct xe_vma *vma,
i++) { i++) {
if (xe_pt_entry(pt_dir, i)) if (xe_pt_entry(pt_dir, i))
xe_pt_destroy(xe_pt_entry(pt_dir, i), xe_pt_destroy(xe_pt_entry(pt_dir, i),
vma->vm->flags, deferred); xe_vma_vm(vma)->flags, deferred);
pt_dir->dir.entries[i] = NULL; pt_dir->dir.entries[i] = NULL;
} }
...@@ -1630,18 +1630,18 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e ...@@ -1630,18 +1630,18 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
.vma = vma, .vma = vma,
}, },
}; };
struct xe_vm *vm = vma->vm; struct xe_vm *vm = xe_vma_vm(vma);
u32 num_entries; u32 num_entries;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct invalidation_fence *ifence; struct invalidation_fence *ifence;
LLIST_HEAD(deferred); LLIST_HEAD(deferred);
xe_bo_assert_held(vma->bo); xe_bo_assert_held(xe_vma_bo(vma));
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
vm_dbg(&vma->vm->xe->drm, vm_dbg(&xe_vma_vm(vma)->xe->drm,
"Preparing unbind, with range [%llx...%llx) engine %p.\n", "Preparing unbind, with range [%llx...%llx) engine %p.\n",
vma->start, vma->end, e); xe_vma_start(vma), xe_vma_end(vma) - 1, e);
num_entries = xe_pt_stage_unbind(tile, vma, entries); num_entries = xe_pt_stage_unbind(tile, vma, entries);
XE_BUG_ON(num_entries > ARRAY_SIZE(entries)); XE_BUG_ON(num_entries > ARRAY_SIZE(entries));
...@@ -1680,8 +1680,8 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e ...@@ -1680,8 +1680,8 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
/* This fence will be installed by caller when doing eviction */ /* This fence will be installed by caller when doing eviction */
if (!xe_vma_has_no_bo(vma) && !vma->bo->vm) if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
dma_resv_add_fence(vma->bo->ttm.base.resv, fence, dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
xe_pt_commit_unbind(vma, entries, num_entries, xe_pt_commit_unbind(vma, entries, num_entries,
unbind_pt_update.locked ? &deferred : NULL); unbind_pt_update.locked ? &deferred : NULL);
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "xe_gt_types.h" #include "xe_gt_types.h"
#include "xe_guc_engine_types.h" #include "xe_guc_engine_types.h"
#include "xe_sched_job.h" #include "xe_sched_job.h"
#include "xe_vm_types.h" #include "xe_vm.h"
DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence, DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
...@@ -374,10 +374,10 @@ DECLARE_EVENT_CLASS(xe_vma, ...@@ -374,10 +374,10 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_fast_assign( TP_fast_assign(
__entry->vma = (unsigned long)vma; __entry->vma = (unsigned long)vma;
__entry->asid = vma->vm->usm.asid; __entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = vma->start; __entry->start = xe_vma_start(vma);
__entry->end = vma->end; __entry->end = xe_vma_end(vma) - 1;
__entry->ptr = (u64)vma->userptr.ptr; __entry->ptr = xe_vma_userptr(vma);
), ),
TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,", TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
......
This diff is collapsed.
...@@ -61,7 +61,66 @@ static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm) ...@@ -61,7 +61,66 @@ static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
} }
struct xe_vma * struct xe_vma *
xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma); xe_vm_find_overlapping_vma(struct xe_vm *vm, struct xe_vma *vma);
/**
* DOC: Provide accessors for vma members to facilitate easy change of
* implementation.
*/
static inline u64 xe_vma_start(struct xe_vma *vma)
{
return vma->start;
}
static inline u64 xe_vma_size(struct xe_vma *vma)
{
return vma->end - vma->start + 1;
}
static inline u64 xe_vma_end(struct xe_vma *vma)
{
return xe_vma_start(vma) + xe_vma_size(vma);
}
static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
{
return vma->bo_offset;
}
static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
{
return vma->bo;
}
static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
{
return vma->vm;
}
static inline bool xe_vma_read_only(struct xe_vma *vma)
{
return vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
}
static inline u64 xe_vma_userptr(struct xe_vma *vma)
{
return vma->userptr.ptr;
}
static inline bool xe_vma_is_null(struct xe_vma *vma)
{
return vma->pte_flags & XE_PTE_FLAG_NULL;
}
static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
{
return !xe_vma_bo(vma);
}
static inline bool xe_vma_is_userptr(struct xe_vma *vma)
{
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
}
#define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv) #define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv)
...@@ -126,21 +185,6 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) ...@@ -126,21 +185,6 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
} }
} }
static inline bool xe_vma_is_null(struct xe_vma *vma)
{
return vma->pte_flags & XE_PTE_FLAG_NULL;
}
static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
{
return !vma->bo;
}
static inline bool xe_vma_is_userptr(struct xe_vma *vma)
{
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
}
int xe_vma_userptr_pin_pages(struct xe_vma *vma); int xe_vma_userptr_pin_pages(struct xe_vma *vma);
int xe_vma_userptr_check_repin(struct xe_vma *vma); int xe_vma_userptr_check_repin(struct xe_vma *vma);
......
...@@ -30,7 +30,7 @@ static int madvise_preferred_mem_class(struct xe_device *xe, struct xe_vm *vm, ...@@ -30,7 +30,7 @@ static int madvise_preferred_mem_class(struct xe_device *xe, struct xe_vm *vm,
struct xe_bo *bo; struct xe_bo *bo;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
bo = vmas[i]->bo; bo = xe_vma_bo(vmas[i]);
err = xe_bo_lock(bo, &ww, 0, true); err = xe_bo_lock(bo, &ww, 0, true);
if (err) if (err)
...@@ -55,7 +55,7 @@ static int madvise_preferred_gt(struct xe_device *xe, struct xe_vm *vm, ...@@ -55,7 +55,7 @@ static int madvise_preferred_gt(struct xe_device *xe, struct xe_vm *vm,
struct xe_bo *bo; struct xe_bo *bo;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
bo = vmas[i]->bo; bo = xe_vma_bo(vmas[i]);
err = xe_bo_lock(bo, &ww, 0, true); err = xe_bo_lock(bo, &ww, 0, true);
if (err) if (err)
...@@ -91,7 +91,7 @@ static int madvise_preferred_mem_class_gt(struct xe_device *xe, ...@@ -91,7 +91,7 @@ static int madvise_preferred_mem_class_gt(struct xe_device *xe,
struct xe_bo *bo; struct xe_bo *bo;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
bo = vmas[i]->bo; bo = xe_vma_bo(vmas[i]);
err = xe_bo_lock(bo, &ww, 0, true); err = xe_bo_lock(bo, &ww, 0, true);
if (err) if (err)
...@@ -114,7 +114,7 @@ static int madvise_cpu_atomic(struct xe_device *xe, struct xe_vm *vm, ...@@ -114,7 +114,7 @@ static int madvise_cpu_atomic(struct xe_device *xe, struct xe_vm *vm,
struct xe_bo *bo; struct xe_bo *bo;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
bo = vmas[i]->bo; bo = xe_vma_bo(vmas[i]);
if (XE_IOCTL_ERR(xe, !(bo->flags & XE_BO_CREATE_SYSTEM_BIT))) if (XE_IOCTL_ERR(xe, !(bo->flags & XE_BO_CREATE_SYSTEM_BIT)))
return -EINVAL; return -EINVAL;
...@@ -145,7 +145,7 @@ static int madvise_device_atomic(struct xe_device *xe, struct xe_vm *vm, ...@@ -145,7 +145,7 @@ static int madvise_device_atomic(struct xe_device *xe, struct xe_vm *vm,
struct xe_bo *bo; struct xe_bo *bo;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
bo = vmas[i]->bo; bo = xe_vma_bo(vmas[i]);
if (XE_IOCTL_ERR(xe, !(bo->flags & XE_BO_CREATE_VRAM0_BIT) && if (XE_IOCTL_ERR(xe, !(bo->flags & XE_BO_CREATE_VRAM0_BIT) &&
!(bo->flags & XE_BO_CREATE_VRAM1_BIT))) !(bo->flags & XE_BO_CREATE_VRAM1_BIT)))
return -EINVAL; return -EINVAL;
...@@ -176,7 +176,7 @@ static int madvise_priority(struct xe_device *xe, struct xe_vm *vm, ...@@ -176,7 +176,7 @@ static int madvise_priority(struct xe_device *xe, struct xe_vm *vm,
struct xe_bo *bo; struct xe_bo *bo;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
bo = vmas[i]->bo; bo = xe_vma_bo(vmas[i]);
err = xe_bo_lock(bo, &ww, 0, true); err = xe_bo_lock(bo, &ww, 0, true);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment