Commit 5f677a9b authored by Matthew Brost's avatar Matthew Brost

drm/xe: Use xe_vma_ops to implement xe_vm_rebind

All page tables updates are moving to a xe_vma_ops interface to
implement 1 job per VM bind IOCTL. Convert xe_vm_rebind to use a
xe_vma_ops based interface.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarOak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-6-matthew.brost@intel.com
parent 701109f2
......@@ -712,37 +712,87 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}
static struct dma_fence *
xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op);
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
INIT_LIST_HEAD(&op->link);
op->base.op = DRM_GPUVA_OP_MAP;
op->base.map.va.addr = vma->gpuva.va.addr;
op->base.map.va.range = vma->gpuva.va.range;
op->base.map.gem.obj = vma->gpuva.gem.obj;
op->base.map.gem.offset = vma->gpuva.gem.offset;
op->map.vma = vma;
op->map.immediate = true;
op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
op->map.is_null = xe_vma_is_null(vma);
}
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
u8 tile_mask)
{
struct xe_vma_op *op;
op = kzalloc(sizeof(*op), GFP_KERNEL);
if (!op)
return -ENOMEM;
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
return 0;
}
static struct dma_fence *ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops,
bool cleanup);
static void xe_vma_ops_init(struct xe_vma_ops *vops);
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
{
struct dma_fence *fence;
struct xe_vma *vma, *next;
struct xe_vma_ops vops;
struct xe_vma_op *op, *next_op;
int err;
lockdep_assert_held(&vm->lock);
if (xe_vm_in_lr_mode(vm) && !rebind_worker)
if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
list_empty(&vm->rebind_list))
return 0;
xe_vma_ops_init(&vops);
xe_vm_assert_held(vm);
list_for_each_entry_safe(vma, next, &vm->rebind_list,
combined_links.rebind) {
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
xe_assert(vm->xe, vma->tile_present);
list_del_init(&vma->combined_links.rebind);
if (rebind_worker)
trace_xe_vma_rebind_worker(vma);
else
trace_xe_vma_rebind_exec(vma);
fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
if (IS_ERR(fence))
return PTR_ERR(fence);
err = xe_vm_ops_add_rebind(&vops, vma,
vma->tile_present);
if (err)
goto free_ops;
}
fence = ops_execute(vm, &vops, false);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
} else {
dma_fence_put(fence);
list_for_each_entry_safe(vma, next, &vm->rebind_list,
combined_links.rebind)
list_del_init(&vma->combined_links.rebind);
}
free_ops:
list_for_each_entry_safe(op, next_op, &vops.list, link) {
list_del(&op->link);
kfree(op);
}
return 0;
return err;
}
static void xe_vma_free(struct xe_vma *vma)
......@@ -2414,7 +2464,7 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
{
struct dma_fence *fence = NULL;
lockdep_assert_held_write(&vm->lock);
lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
......@@ -2533,7 +2583,7 @@ xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
{
struct dma_fence *fence = ERR_PTR(-ENOMEM);
lockdep_assert_held_write(&vm->lock);
lockdep_assert_held(&vm->lock);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment