Commit 3e3eb55e authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-next-fixes-2024-05-16' of...

Merge tag 'drm-misc-next-fixes-2024-05-16' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next-fixes for v6.10-rc1:
- VM_BIND fix for nouveau.
- Lots of panthor fixes:
  * Fixes for panthor's heap logical block.
  * Reset on unrecoverable fault
  * Fix VM references.
  * Reset fix.
- xlnx compile and doc fixes.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/54d2c8b9-8b04-45fc-b483-200ffac9d344@linux.intel.com
parents 5a5a10d9 959314c4
...@@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) ...@@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr); getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
break; break;
} }
case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
getparam->value = 1;
break;
default: default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL; return -EINVAL;
......
...@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, ...@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
} }
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
if (!nouveau_cli_uvmm(cli) || internal) {
/* for BO noVM allocs, don't assign kinds */
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
} else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
nvbo->kind = (tile_flags & 0x00007f00) >> 8; if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
nvbo->comp = (tile_flags & 0x00030000) >> 16; kfree(nvbo);
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { return ERR_PTR(-EINVAL);
kfree(nvbo); }
return ERR_PTR(-EINVAL);
} nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
} else { } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
nvbo->zeta = (tile_flags & 0x00000007); nvbo->kind = (tile_flags & 0x00007f00) >> 8;
nvbo->comp = (tile_flags & 0x00030000) >> 16;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
} }
nvbo->mode = tile_mode; } else {
nvbo->zeta = (tile_flags & 0x00000007);
}
nvbo->mode = tile_mode;
if (!nouveau_cli_uvmm(cli) || internal) {
/* Determine the desirable target GPU page size for the buffer. */ /* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) { for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail /* Because we cannot currently allow VMM maps to fail
...@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, ...@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
} }
nvbo->page = vmm->page[pi].shift; nvbo->page = vmm->page[pi].shift;
} else { } else {
/* reject other tile flags when in VM mode. */
if (tile_mode)
return ERR_PTR(-EINVAL);
if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
return ERR_PTR(-EINVAL);
/* Determine the desirable target GPU page size for the buffer. */ /* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) { for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail /* Because we cannot currently allow VMM maps to fail
......
...@@ -129,13 +129,8 @@ static void panthor_device_reset_work(struct work_struct *work) ...@@ -129,13 +129,8 @@ static void panthor_device_reset_work(struct work_struct *work)
panthor_gpu_l2_power_on(ptdev); panthor_gpu_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev); panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev); ret = panthor_fw_post_reset(ptdev);
if (ret)
goto out_dev_exit;
atomic_set(&ptdev->reset.pending, 0); atomic_set(&ptdev->reset.pending, 0);
panthor_sched_post_reset(ptdev); panthor_sched_post_reset(ptdev, ret != 0);
out_dev_exit:
drm_dev_exit(cookie); drm_dev_exit(cookie);
if (ret) { if (ret) {
...@@ -293,6 +288,7 @@ static const struct panthor_exception_info panthor_exception_infos[] = { ...@@ -293,6 +288,7 @@ static const struct panthor_exception_info panthor_exception_infos[] = {
PANTHOR_EXCEPTION(ACTIVE), PANTHOR_EXCEPTION(ACTIVE),
PANTHOR_EXCEPTION(CS_RES_TERM), PANTHOR_EXCEPTION(CS_RES_TERM),
PANTHOR_EXCEPTION(CS_CONFIG_FAULT), PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT), PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
PANTHOR_EXCEPTION(CS_BUS_FAULT), PANTHOR_EXCEPTION(CS_BUS_FAULT),
PANTHOR_EXCEPTION(CS_INSTR_INVALID), PANTHOR_EXCEPTION(CS_INSTR_INVALID),
......
...@@ -216,6 +216,7 @@ enum drm_panthor_exception_type { ...@@ -216,6 +216,7 @@ enum drm_panthor_exception_type {
DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f, DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f, DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40, DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE = 0x41,
DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44, DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48, DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49, DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
......
...@@ -453,7 +453,7 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev, ...@@ -453,7 +453,7 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
ret = panthor_kernel_bo_vmap(mem); ret = panthor_kernel_bo_vmap(mem);
if (ret) { if (ret) {
panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), mem); panthor_kernel_bo_destroy(mem);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -1134,7 +1134,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev) ...@@ -1134,7 +1134,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
panthor_fw_stop(ptdev); panthor_fw_stop(ptdev);
list_for_each_entry(section, &ptdev->fw->sections, node) list_for_each_entry(section, &ptdev->fw->sections, node)
panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), section->mem); panthor_kernel_bo_destroy(section->mem);
/* We intentionally don't call panthor_vm_idle() and let /* We intentionally don't call panthor_vm_idle() and let
* panthor_mmu_unplug() release the AS we acquired with * panthor_mmu_unplug() release the AS we acquired with
...@@ -1142,6 +1142,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev) ...@@ -1142,6 +1142,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
* state to keep the active_refcnt balanced. * state to keep the active_refcnt balanced.
*/ */
panthor_vm_put(ptdev->fw->vm); panthor_vm_put(ptdev->fw->vm);
ptdev->fw->vm = NULL;
panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000); panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
} }
......
...@@ -26,18 +26,18 @@ static void panthor_gem_free_object(struct drm_gem_object *obj) ...@@ -26,18 +26,18 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
/** /**
* panthor_kernel_bo_destroy() - Destroy a kernel buffer object * panthor_kernel_bo_destroy() - Destroy a kernel buffer object
* @vm: The VM this BO was mapped to.
* @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
* is skipped. * is skipped.
*/ */
void panthor_kernel_bo_destroy(struct panthor_vm *vm, void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
struct panthor_kernel_bo *bo)
{ {
struct panthor_vm *vm;
int ret; int ret;
if (IS_ERR_OR_NULL(bo)) if (IS_ERR_OR_NULL(bo))
return; return;
vm = bo->vm;
panthor_kernel_bo_vunmap(bo); panthor_kernel_bo_vunmap(bo);
if (drm_WARN_ON(bo->obj->dev, if (drm_WARN_ON(bo->obj->dev,
...@@ -53,6 +53,7 @@ void panthor_kernel_bo_destroy(struct panthor_vm *vm, ...@@ -53,6 +53,7 @@ void panthor_kernel_bo_destroy(struct panthor_vm *vm,
drm_gem_object_put(bo->obj); drm_gem_object_put(bo->obj);
out_free_bo: out_free_bo:
panthor_vm_put(vm);
kfree(bo); kfree(bo);
} }
...@@ -106,6 +107,7 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, ...@@ -106,6 +107,7 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
if (ret) if (ret)
goto err_free_va; goto err_free_va;
kbo->vm = panthor_vm_get(vm);
bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
drm_gem_object_get(bo->exclusive_vm_root_gem); drm_gem_object_get(bo->exclusive_vm_root_gem);
bo->base.base.resv = bo->exclusive_vm_root_gem->resv; bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
......
...@@ -61,6 +61,11 @@ struct panthor_kernel_bo { ...@@ -61,6 +61,11 @@ struct panthor_kernel_bo {
*/ */
struct drm_gem_object *obj; struct drm_gem_object *obj;
/**
* @vm: VM this private buffer is attached to.
*/
struct panthor_vm *vm;
/** /**
* @va_node: VA space allocated to this GEM. * @va_node: VA space allocated to this GEM.
*/ */
...@@ -136,7 +141,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, ...@@ -136,7 +141,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags, size_t size, u32 bo_flags, u32 vm_map_flags,
u64 gpu_va); u64 gpu_va);
void panthor_kernel_bo_destroy(struct panthor_vm *vm, void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
struct panthor_kernel_bo *bo);
#endif /* __PANTHOR_GEM_H__ */ #endif /* __PANTHOR_GEM_H__ */
...@@ -127,7 +127,7 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm, ...@@ -127,7 +127,7 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm,
heap->chunk_count--; heap->chunk_count--;
mutex_unlock(&heap->lock); mutex_unlock(&heap->lock);
panthor_kernel_bo_destroy(vm, chunk->bo); panthor_kernel_bo_destroy(chunk->bo);
kfree(chunk); kfree(chunk);
} }
...@@ -183,7 +183,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, ...@@ -183,7 +183,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
return 0; return 0;
err_destroy_bo: err_destroy_bo:
panthor_kernel_bo_destroy(vm, chunk->bo); panthor_kernel_bo_destroy(chunk->bo);
err_free_chunk: err_free_chunk:
kfree(chunk); kfree(chunk);
...@@ -253,8 +253,8 @@ int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle) ...@@ -253,8 +253,8 @@ int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle)
* @pool: Pool to instantiate the heap context from. * @pool: Pool to instantiate the heap context from.
* @initial_chunk_count: Number of chunk allocated at initialization time. * @initial_chunk_count: Number of chunk allocated at initialization time.
* Must be at least 1. * Must be at least 1.
* @chunk_size: The size of each chunk. Must be a power of two between 256k * @chunk_size: The size of each chunk. Must be page-aligned and lie in the
* and 2M. * [128k:8M] range.
* @max_chunks: Maximum number of chunks that can be allocated. * @max_chunks: Maximum number of chunks that can be allocated.
* @target_in_flight: Maximum number of in-flight render passes. * @target_in_flight: Maximum number of in-flight render passes.
* @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap * @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap
...@@ -281,8 +281,11 @@ int panthor_heap_create(struct panthor_heap_pool *pool, ...@@ -281,8 +281,11 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
if (initial_chunk_count == 0) if (initial_chunk_count == 0)
return -EINVAL; return -EINVAL;
if (hweight32(chunk_size) != 1 || if (initial_chunk_count > max_chunks)
chunk_size < SZ_256K || chunk_size > SZ_2M) return -EINVAL;
if (!IS_ALIGNED(chunk_size, PAGE_SIZE) ||
chunk_size < SZ_128K || chunk_size > SZ_8M)
return -EINVAL; return -EINVAL;
down_read(&pool->lock); down_read(&pool->lock);
...@@ -320,7 +323,8 @@ int panthor_heap_create(struct panthor_heap_pool *pool, ...@@ -320,7 +323,8 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
if (!pool->vm) { if (!pool->vm) {
ret = -EINVAL; ret = -EINVAL;
} else { } else {
ret = xa_alloc(&pool->xa, &id, heap, XA_LIMIT(1, MAX_HEAPS_PER_POOL), GFP_KERNEL); ret = xa_alloc(&pool->xa, &id, heap,
XA_LIMIT(0, MAX_HEAPS_PER_POOL - 1), GFP_KERNEL);
if (!ret) { if (!ret) {
void *gpu_ctx = panthor_get_heap_ctx(pool, id); void *gpu_ctx = panthor_get_heap_ctx(pool, id);
...@@ -391,7 +395,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool, ...@@ -391,7 +395,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
mutex_unlock(&heap->lock); mutex_unlock(&heap->lock);
if (removed) { if (removed) {
panthor_kernel_bo_destroy(pool->vm, chunk->bo); panthor_kernel_bo_destroy(chunk->bo);
kfree(chunk); kfree(chunk);
ret = 0; ret = 0;
} else { } else {
...@@ -410,6 +414,13 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool, ...@@ -410,6 +414,13 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
* @renderpasses_in_flight: Number of render passes currently in-flight. * @renderpasses_in_flight: Number of render passes currently in-flight.
* @pending_frag_count: Number of fragment jobs waiting for execution/completion. * @pending_frag_count: Number of fragment jobs waiting for execution/completion.
* @new_chunk_gpu_va: Pointer used to return the chunk VA. * @new_chunk_gpu_va: Pointer used to return the chunk VA.
*
* Return:
* - 0 if a new heap was allocated
* - -ENOMEM if the tiler context reached the maximum number of chunks
* or if too many render passes are in-flight
* or if the allocation failed
* - -EINVAL if any of the arguments passed to panthor_heap_grow() is invalid
*/ */
int panthor_heap_grow(struct panthor_heap_pool *pool, int panthor_heap_grow(struct panthor_heap_pool *pool,
u64 heap_gpu_va, u64 heap_gpu_va,
...@@ -439,10 +450,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool, ...@@ -439,10 +450,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool,
* handler provided by the userspace driver, if any). * handler provided by the userspace driver, if any).
*/ */
if (renderpasses_in_flight > heap->target_in_flight || if (renderpasses_in_flight > heap->target_in_flight ||
(pending_frag_count > 0 && heap->chunk_count >= heap->max_chunks)) { heap->chunk_count >= heap->max_chunks) {
ret = -EBUSY;
goto out_unlock;
} else if (heap->chunk_count >= heap->max_chunks) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
...@@ -536,7 +544,7 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm) ...@@ -536,7 +544,7 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
pool->vm = vm; pool->vm = vm;
pool->ptdev = ptdev; pool->ptdev = ptdev;
init_rwsem(&pool->lock); init_rwsem(&pool->lock);
xa_init_flags(&pool->xa, XA_FLAGS_ALLOC1); xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
kref_init(&pool->refcount); kref_init(&pool->refcount);
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize, pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
...@@ -587,7 +595,7 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool) ...@@ -587,7 +595,7 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i)); drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
if (!IS_ERR_OR_NULL(pool->gpu_contexts)) if (!IS_ERR_OR_NULL(pool->gpu_contexts))
panthor_kernel_bo_destroy(pool->vm, pool->gpu_contexts); panthor_kernel_bo_destroy(pool->gpu_contexts);
/* Reflects the fact the pool has been destroyed. */ /* Reflects the fact the pool has been destroyed. */
pool->vm = NULL; pool->vm = NULL;
......
...@@ -826,8 +826,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * ...@@ -826,8 +826,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
panthor_queue_put_syncwait_obj(queue); panthor_queue_put_syncwait_obj(queue);
panthor_kernel_bo_destroy(group->vm, queue->ringbuf); panthor_kernel_bo_destroy(queue->ringbuf);
panthor_kernel_bo_destroy(panthor_fw_vm(group->ptdev), queue->iface.mem); panthor_kernel_bo_destroy(queue->iface.mem);
kfree(queue); kfree(queue);
} }
...@@ -837,15 +837,14 @@ static void group_release_work(struct work_struct *work) ...@@ -837,15 +837,14 @@ static void group_release_work(struct work_struct *work)
struct panthor_group *group = container_of(work, struct panthor_group *group = container_of(work,
struct panthor_group, struct panthor_group,
release_work); release_work);
struct panthor_device *ptdev = group->ptdev;
u32 i; u32 i;
for (i = 0; i < group->queue_count; i++) for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]); group_free_queue(group, group->queues[i]);
panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->suspend_buf); panthor_kernel_bo_destroy(group->suspend_buf);
panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->protm_suspend_buf); panthor_kernel_bo_destroy(group->protm_suspend_buf);
panthor_kernel_bo_destroy(group->vm, group->syncobjs); panthor_kernel_bo_destroy(group->syncobjs);
panthor_vm_put(group->vm); panthor_vm_put(group->vm);
kfree(group); kfree(group);
...@@ -1281,7 +1280,16 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, ...@@ -1281,7 +1280,16 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
if (group) if (group)
group->fatal_queues |= BIT(cs_id); group->fatal_queues |= BIT(cs_id);
sched_queue_delayed_work(sched, tick, 0); if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
/* If this exception is unrecoverable, queue a reset, and make
* sure we stop scheduling groups until the reset has happened.
*/
panthor_device_schedule_reset(ptdev);
cancel_delayed_work(&sched->tick_work);
} else {
sched_queue_delayed_work(sched, tick, 0);
}
drm_warn(&ptdev->base, drm_warn(&ptdev->base,
"CSG slot %d CS slot: %d\n" "CSG slot %d CS slot: %d\n"
"CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n" "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
...@@ -1385,7 +1393,12 @@ static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id) ...@@ -1385,7 +1393,12 @@ static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
pending_frag_count, &new_chunk_va); pending_frag_count, &new_chunk_va);
} }
if (ret && ret != -EBUSY) { /* If the heap context doesn't have memory for us, we want to let the
* FW try to reclaim memory by waiting for fragment jobs to land or by
* executing the tiler OOM exception handler, which is supposed to
* implement incremental rendering.
*/
if (ret && ret != -ENOMEM) {
drm_warn(&ptdev->base, "Failed to extend the tiler heap\n"); drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
group->fatal_queues |= BIT(cs_id); group->fatal_queues |= BIT(cs_id);
sched_queue_delayed_work(sched, tick, 0); sched_queue_delayed_work(sched, tick, 0);
...@@ -2720,15 +2733,22 @@ void panthor_sched_pre_reset(struct panthor_device *ptdev) ...@@ -2720,15 +2733,22 @@ void panthor_sched_pre_reset(struct panthor_device *ptdev)
mutex_unlock(&sched->reset.lock); mutex_unlock(&sched->reset.lock);
} }
void panthor_sched_post_reset(struct panthor_device *ptdev) void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
{ {
struct panthor_scheduler *sched = ptdev->scheduler; struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_group *group, *group_tmp; struct panthor_group *group, *group_tmp;
mutex_lock(&sched->reset.lock); mutex_lock(&sched->reset.lock);
list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
/* Consider all previously running group as terminated if the
* reset failed.
*/
if (reset_failed)
group->state = PANTHOR_CS_GROUP_TERMINATED;
panthor_group_start(group); panthor_group_start(group);
}
/* We're done resetting the GPU, clear the reset.in_progress bit so we can /* We're done resetting the GPU, clear the reset.in_progress bit so we can
* kick the scheduler. * kick the scheduler.
...@@ -2736,9 +2756,11 @@ void panthor_sched_post_reset(struct panthor_device *ptdev) ...@@ -2736,9 +2756,11 @@ void panthor_sched_post_reset(struct panthor_device *ptdev)
atomic_set(&sched->reset.in_progress, false); atomic_set(&sched->reset.in_progress, false);
mutex_unlock(&sched->reset.lock); mutex_unlock(&sched->reset.lock);
sched_queue_delayed_work(sched, tick, 0); /* No need to queue a tick and update syncs if the reset failed. */
if (!reset_failed) {
sched_queue_work(sched, sync_upd); sched_queue_delayed_work(sched, tick, 0);
sched_queue_work(sched, sync_upd);
}
} }
static void group_sync_upd_work(struct work_struct *work) static void group_sync_upd_work(struct work_struct *work)
......
...@@ -40,7 +40,7 @@ void panthor_group_pool_destroy(struct panthor_file *pfile); ...@@ -40,7 +40,7 @@ void panthor_group_pool_destroy(struct panthor_file *pfile);
int panthor_sched_init(struct panthor_device *ptdev); int panthor_sched_init(struct panthor_device *ptdev);
void panthor_sched_unplug(struct panthor_device *ptdev); void panthor_sched_unplug(struct panthor_device *ptdev);
void panthor_sched_pre_reset(struct panthor_device *ptdev); void panthor_sched_pre_reset(struct panthor_device *ptdev);
void panthor_sched_post_reset(struct panthor_device *ptdev); void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed);
void panthor_sched_suspend(struct panthor_device *ptdev); void panthor_sched_suspend(struct panthor_device *ptdev);
void panthor_sched_resume(struct panthor_device *ptdev); void panthor_sched_resume(struct panthor_device *ptdev);
......
...@@ -940,7 +940,7 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer, ...@@ -940,7 +940,7 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer,
* zynqmp_disp_layer_find_live_format - Find format information for given * zynqmp_disp_layer_find_live_format - Find format information for given
* media bus format * media bus format
* @layer: The layer * @layer: The layer
* @drm_fmt: Media bus format to search * @media_bus_format: Media bus format to search
* *
* Search display subsystem format information corresponding to the given media * Search display subsystem format information corresponding to the given media
* bus format @media_bus_format for the @layer, and return a pointer to the * bus format @media_bus_format for the @layer, and return a pointer to the
...@@ -981,7 +981,7 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer, ...@@ -981,7 +981,7 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
unsigned int i; unsigned int i;
u32 *formats; u32 *formats;
if (WARN_ON(!layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)) { if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_NONLIVE)) {
*num_formats = 0; *num_formats = 0;
return NULL; return NULL;
} }
...@@ -1117,7 +1117,7 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer, ...@@ -1117,7 +1117,7 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
/** /**
* zynqmp_disp_layer_set_live_format - Set the live video layer format * zynqmp_disp_layer_set_live_format - Set the live video layer format
* @layer: The layer * @layer: The layer
* @info: The format info * @media_bus_format: Media bus format to set
* *
* NOTE: This function should not be used to set format for non-live video * NOTE: This function should not be used to set format for non-live video
* layer. Use zynqmp_disp_layer_set_format() instead. * layer. Use zynqmp_disp_layer_set_format() instead.
......
...@@ -68,6 +68,13 @@ extern "C" { ...@@ -68,6 +68,13 @@ extern "C" {
*/ */
#define NOUVEAU_GETPARAM_VRAM_USED 19 #define NOUVEAU_GETPARAM_VRAM_USED 19
/*
* NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
*
* Query whether tile mode and PTE kind are accepted with VM allocs or not.
*/
#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
struct drm_nouveau_getparam { struct drm_nouveau_getparam {
__u64 param; __u64 param;
__u64 value; __u64 value;
......
...@@ -895,13 +895,21 @@ struct drm_panthor_tiler_heap_create { ...@@ -895,13 +895,21 @@ struct drm_panthor_tiler_heap_create {
/** @vm_id: VM ID the tiler heap should be mapped to */ /** @vm_id: VM ID the tiler heap should be mapped to */
__u32 vm_id; __u32 vm_id;
/** @initial_chunk_count: Initial number of chunks to allocate. */ /** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
__u32 initial_chunk_count; __u32 initial_chunk_count;
/** @chunk_size: Chunk size. Must be a power of two at least 256KB large. */ /**
* @chunk_size: Chunk size.
*
* Must be page-aligned and lie in the [128k:8M] range.
*/
__u32 chunk_size; __u32 chunk_size;
/** @max_chunks: Maximum number of chunks that can be allocated. */ /**
* @max_chunks: Maximum number of chunks that can be allocated.
*
* Must be at least @initial_chunk_count.
*/
__u32 max_chunks; __u32 max_chunks;
/** /**
...@@ -931,7 +939,11 @@ struct drm_panthor_tiler_heap_create { ...@@ -931,7 +939,11 @@ struct drm_panthor_tiler_heap_create {
* struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
*/ */
struct drm_panthor_tiler_heap_destroy { struct drm_panthor_tiler_heap_destroy {
/** @handle: Handle of the tiler heap to destroy */ /**
* @handle: Handle of the tiler heap to destroy.
*
* Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
*/
__u32 handle; __u32 handle;
/** @pad: Padding field, MBZ. */ /** @pad: Padding field, MBZ. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment