Commit 266f7618 authored by Danilo Krummrich's avatar Danilo Krummrich

drm/nouveau: separately allocate struct nouveau_uvmm

Allocate struct nouveau_uvmm separately in preparation for subsequent
commits introducing reference counting for struct drm_gpuvm.

While at it, get rid of nouveau_uvmm_init() as indirection of
nouveau_uvmm_ioctl_vm_init() and perform some minor cleanups.
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarDanilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-9-dakr@redhat.com
parent 809ef191
......@@ -190,6 +190,8 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
static void
nouveau_cli_fini(struct nouveau_cli *cli)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm_locked(cli);
/* All our channels are dead now, which means all the fences they
* own are signalled, and all callback functions have been called.
*
......@@ -199,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
nouveau_uvmm_fini(&cli->uvmm);
if (uvmm)
nouveau_uvmm_fini(uvmm);
nouveau_sched_entity_fini(&cli->sched_entity);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
......
......@@ -93,7 +93,10 @@ struct nouveau_cli {
struct nvif_mmu mmu;
struct nouveau_vmm vmm;
struct nouveau_vmm svm;
struct nouveau_uvmm uvmm;
struct {
struct nouveau_uvmm *ptr;
bool disabled;
} uvmm;
struct nouveau_sched_entity sched_entity;
......@@ -121,10 +124,7 @@ struct nouveau_cli_work {
static inline struct nouveau_uvmm *
nouveau_cli_uvmm(struct nouveau_cli *cli)
{
if (!cli || !cli->uvmm.vmm.cli)
return NULL;
return &cli->uvmm;
return cli ? cli->uvmm.ptr : NULL;
}
static inline struct nouveau_uvmm *
......
......@@ -1636,18 +1636,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
return ret;
}
int
nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_vm_init *init = data;
return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
init->kernel_managed_size);
}
static int
nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
{
......@@ -1793,17 +1781,25 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
}
int
nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
u64 kernel_managed_addr, u64 kernel_managed_size)
nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
struct nouveau_uvmm *uvmm;
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_device *drm = cli->drm->dev;
struct drm_gem_object *r_obj;
u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
struct drm_nouveau_vm_init *init = data;
u64 kernel_managed_end;
int ret;
mutex_init(&uvmm->mutex);
mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
if (check_add_overflow(init->kernel_managed_addr,
init->kernel_managed_size,
&kernel_managed_end))
return -EINVAL;
if (kernel_managed_end > NOUVEAU_VA_SPACE_END)
return -EINVAL;
mutex_lock(&cli->mutex);
......@@ -1812,44 +1808,49 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
goto out_unlock;
}
if (kernel_managed_end <= kernel_managed_addr) {
ret = -EINVAL;
goto out_unlock;
}
if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
ret = -EINVAL;
uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL);
if (!uvmm) {
ret = -ENOMEM;
goto out_unlock;
}
r_obj = drm_gpuvm_resv_object_alloc(drm);
if (!r_obj) {
kfree(uvmm);
ret = -ENOMEM;
goto out_unlock;
}
mutex_init(&uvmm->mutex);
mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj,
NOUVEAU_VA_SPACE_START,
NOUVEAU_VA_SPACE_END,
kernel_managed_addr, kernel_managed_size,
init->kernel_managed_addr,
init->kernel_managed_size,
NULL);
/* GPUVM takes care from here on. */
drm_gem_object_put(r_obj);
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
cli->vmm.vmm.object.oclass, RAW,
kernel_managed_addr, kernel_managed_size,
NULL, 0, &cli->uvmm.vmm.vmm);
init->kernel_managed_addr,
init->kernel_managed_size,
NULL, 0, &uvmm->vmm.vmm);
if (ret)
goto out_gpuvm_fini;
cli->uvmm.vmm.cli = cli;
uvmm->vmm.cli = cli;
cli->uvmm.ptr = uvmm;
mutex_unlock(&cli->mutex);
return 0;
out_gpuvm_fini:
drm_gpuvm_destroy(&uvmm->base);
kfree(uvmm);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
......@@ -1864,9 +1865,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
struct nouveau_sched_entity *entity = &cli->sched_entity;
struct drm_gpuva *va, *next;
if (!cli)
return;
rmb(); /* for list_empty to work without lock */
wait_event(entity->job.wq, list_empty(&entity->job.list.head));
......@@ -1905,5 +1903,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
drm_gpuvm_destroy(&uvmm->base);
kfree(uvmm);
mutex_unlock(&cli->mutex);
}
......@@ -12,8 +12,6 @@ struct nouveau_uvmm {
struct nouveau_vmm vmm;
struct maple_tree region_mt;
struct mutex mutex;
bool disabled;
};
struct nouveau_uvma_region {
......@@ -78,8 +76,6 @@ struct nouveau_uvmm_bind_job_args {
#define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base)
int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
u64 kernel_managed_addr, u64 kernel_managed_size);
void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm);
void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment