Commit 60c16201 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-xe-fixes-2024-02-08' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

Driver Changes:
- Fix a loop in an error path
- Fix a missing dma-fence reference
- Fix a retry path on userptr REMAP
- Workaround for a false gcc warning
- Fix missing map of the usm batch buffer
  in the migrate vm.
- Fix a memory leak.
- Fix a bad assumption of used page size
- Fix hitting a BUG() due to zero pages to map.
- Remove some leftover async bind queue relics
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZcS2LllawGifubsk@fedora
parents 6c2bf9ca bf4c27b8
......@@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
int xe_display_init_nommio(struct xe_device *xe)
{
int err;
if (!xe->info.enable_display)
return 0;
......@@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(xe);
err = intel_power_domains_init(xe);
if (err)
return err;
return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
}
......
......@@ -926,20 +926,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
* @q: The exec queue
* @vm: The VM the engine does a bind or exec for
*
* Get last fence, does not take a ref
* Get last fence, takes a ref
*
* Returns: last fence if not signaled, dma fence stub if signaled
*/
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
struct xe_vm *vm)
{
struct dma_fence *fence;
xe_exec_queue_last_fence_lockdep_assert(q, vm);
if (q->last_fence &&
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
xe_exec_queue_last_fence_put(q, vm);
return q->last_fence ? q->last_fence : dma_fence_get_stub();
fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
dma_fence_get(fence);
return fence;
}
/**
......
......@@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
* USM has its only SA pool to non-block behind user operations
*/
if (gt_to_xe(gt)->info.has_usm) {
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
struct xe_device *xe = gt_to_xe(gt);
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
if (IS_ERR(gt->usm.bb_pool)) {
err = PTR_ERR(gt->usm.bb_pool);
goto err_force_wake;
......
......@@ -335,7 +335,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
return -EPROTO;
asid = FIELD_GET(PFD_ASID, msg[1]);
pf_queue = &gt->usm.pf_queue[asid % NUM_PF_QUEUE];
pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
......
......@@ -170,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (!IS_DGFX(xe)) {
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
if (xe->info.has_usm) {
batch = tile->primary_gt->usm.bb_pool->bo;
m->usm_batch_base_ofs = m->batch_base_ofs;
}
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
......@@ -185,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
entry);
level++;
}
if (xe->info.has_usm) {
xe_tile_assert(tile, batch->size == SZ_1M);
batch = tile->primary_gt->usm.bb_pool->bo;
m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
xe_tile_assert(tile, batch->size == SZ_512K);
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
pat_index, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
entry);
level++;
}
}
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
......@@ -1204,8 +1217,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
}
if (q) {
fence = xe_exec_queue_last_fence_get(q, vm);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
dma_fence_put(fence);
return false;
}
dma_fence_put(fence);
}
return true;
......
......@@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
struct dma_fence *fence;
fence = xe_exec_queue_last_fence_get(job->q, vm);
dma_fence_get(fence);
return drm_sched_job_add_dependency(&job->drm, fence);
}
......@@ -307,7 +307,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
/* Easy case... */
if (!num_in_fence) {
fence = xe_exec_queue_last_fence_get(q, vm);
dma_fence_get(fence);
return fence;
}
......@@ -322,7 +321,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
}
}
fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
dma_fence_get(fences[current_fence - 1]);
cf = dma_fence_array_create(num_in_fence, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
......
......@@ -37,8 +37,6 @@
#include "generated/xe_wa_oob.h"
#include "xe_wa.h"
#define TEST_VM_ASYNC_OPS_ERROR
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
return vm->gpuvm.r_obj;
......@@ -114,11 +112,8 @@ int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
num_pages - pinned,
read_only ? 0 : FOLL_WRITE,
&pages[pinned]);
if (ret < 0) {
if (in_kthread)
ret = 0;
if (ret < 0)
break;
}
pinned += ret;
ret = 0;
......@@ -1984,6 +1979,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
xe_sync_entry_signal(&syncs[i], NULL, fence);
dma_fence_put(fence);
}
}
......@@ -2064,7 +2060,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
struct xe_vma_op *op;
struct drm_gpuvm_bo *vm_bo;
int err;
......@@ -2111,15 +2106,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
if (IS_ERR(ops))
return ops;
#ifdef TEST_VM_ASYNC_OPS_ERROR
if (operation & FORCE_ASYNC_OP_ERROR) {
op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
base.entry);
if (op)
op->inject_error = true;
}
#endif
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
......@@ -2199,8 +2185,10 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
return SZ_1G;
else if (vma->gpuva.flags & XE_VMA_PTE_2M)
return SZ_2M;
else if (vma->gpuva.flags & XE_VMA_PTE_4K)
return SZ_4K;
return SZ_4K;
return SZ_1G; /* Uninitialized, used max size */
}
static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
......@@ -2530,13 +2518,25 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
}
drm_exec_fini(&exec);
if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
if (err == -EAGAIN) {
lockdep_assert_held_write(&vm->lock);
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
if (!err)
goto retry_userptr;
trace_xe_vma_fail(vma);
if (op->base.op == DRM_GPUVA_OP_REMAP) {
if (!op->remap.unmap_done)
vma = gpuva_to_vma(op->base.remap.unmap->va);
else if (op->remap.prev)
vma = op->remap.prev;
else
vma = op->remap.next;
}
if (xe_vma_is_userptr(vma)) {
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
if (!err)
goto retry_userptr;
trace_xe_vma_fail(vma);
}
}
return err;
......@@ -2548,13 +2548,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
lockdep_assert_held_write(&vm->lock);
#ifdef TEST_VM_ASYNC_OPS_ERROR
if (op->inject_error) {
op->inject_error = false;
return -ENOMEM;
}
#endif
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
ret = __xe_vma_op_execute(vm, op->map.vma, op);
......@@ -2669,7 +2662,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
{
int i;
for (i = num_ops_list - 1; i; ++i) {
for (i = num_ops_list - 1; i >= 0; --i) {
struct drm_gpuva_ops *__ops = ops[i];
struct drm_gpuva_op *__op;
......@@ -2714,16 +2707,9 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
#ifdef TEST_VM_ASYNC_OPS_ERROR
#define SUPPORTED_FLAGS \
(FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
#else
#define SUPPORTED_FLAGS \
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
0xffff)
#endif
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
......
......@@ -21,9 +21,6 @@ struct xe_bo;
struct xe_sync_entry;
struct xe_vm;
#define TEST_VM_ASYNC_OPS_ERROR
#define FORCE_ASYNC_OP_ERROR BIT(31)
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
......@@ -360,11 +357,6 @@ struct xe_vma_op {
/** @flags: operation flags */
enum xe_vma_op_flags flags;
#ifdef TEST_VM_ASYNC_OPS_ERROR
/** @inject_error: inject error to test async op error handling */
bool inject_error;
#endif
union {
/** @map: VMA map operation specific data */
struct xe_vma_op_map map;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment