Commit f9463a4b authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: implement new vmm frontend

These are the new priviledged interfaces to the VMM backends, and expose
some functionality that wasn't previously available.

It's now possible to allocate a chunk of address-space (even all of it),
without causing page tables to be allocated up-front, and then map into
it at arbitrary locations.  This is the basic primitive used to support
features such as sparse mapping, or to allow userspace control over its
own address-space, or HMM (where the GPU driver isn't in control of the
address-space layout).

Rather than being tied to a subtle combination of memory object and VMA
properties, arguments that control map flags (ro, kind, etc) are passed
explicitly at map time.

The compatibility hacks to implement the old frontend on top of the new
driver backends have been replaced with something similar to implement
the old frontend's interfaces on top of the new frontend.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 26880e76
#ifndef __NVKM_FB_H__ #ifndef __NVKM_FB_H__
#define __NVKM_FB_H__ #define __NVKM_FB_H__
#include <core/subdev.h> #include <core/subdev.h>
#include <core/memory.h> #include <core/mm.h>
#include <subdev/mmu.h>
/* memory type/access flags, do not match hardware values */ /* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO 1 #define NV_MEM_ACCESS_RO 1
......
#ifndef __NVKM_MMU_H__ #ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__ #define __NVKM_MMU_H__
#include <core/subdev.h> #include <core/subdev.h>
#include <core/memory.h>
#include <core/mm.h>
struct nvkm_gpuobj;
struct nvkm_mem; struct nvkm_mem;
struct nvkm_vm_pgt { struct nvkm_vm_pgt {
...@@ -12,14 +9,25 @@ struct nvkm_vm_pgt { ...@@ -12,14 +9,25 @@ struct nvkm_vm_pgt {
}; };
struct nvkm_vma { struct nvkm_vma {
struct nvkm_memory *memory; struct list_head head;
struct nvkm_tags *tags; struct rb_node tree;
u64 addr;
u64 size:50;
bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
#define NVKM_VMA_PAGE_NONE 7
u8 page:3; /* Requested page type (index, or NONE for automatic). */
u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
bool used:1; /* Region allocated. */
bool part:1; /* Region was split from an allocated region by map(). */
bool user:1; /* Region user-allocated. */
bool busy:1; /* Region busy (for temporarily preventing user access). */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */
struct nvkm_vma *node;
struct nvkm_vm *vm; struct nvkm_vm *vm;
struct nvkm_mm_node *node; u64 offset;
union {
u64 offset;
u64 addr;
};
u32 access; u32 access;
}; };
...@@ -37,8 +45,9 @@ struct nvkm_vm { ...@@ -37,8 +45,9 @@ struct nvkm_vm {
struct nvkm_vmm_pt *pd; struct nvkm_vmm_pt *pd;
struct list_head join; struct list_head join;
struct nvkm_mm mm; struct list_head list;
struct kref refcount; struct rb_root free;
struct rb_root root;
bool bootstrapped; bool bootstrapped;
atomic_t engref[NVKM_SUBDEV_NR]; atomic_t engref[NVKM_SUBDEV_NR];
...@@ -57,9 +66,16 @@ void nvkm_vm_put(struct nvkm_vma *); ...@@ -57,9 +66,16 @@ void nvkm_vm_put(struct nvkm_vma *);
void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *); void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *); void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
void nvkm_vm_unmap(struct nvkm_vma *); void nvkm_vm_unmap(struct nvkm_vma *);
void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
struct lock_class_key *, const char *name, struct nvkm_vmm **);
struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
void nvkm_vmm_unref(struct nvkm_vmm **);
int nvkm_vmm_boot(struct nvkm_vmm *); int nvkm_vmm_boot(struct nvkm_vmm *);
int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
struct nvkm_vmm_map { struct nvkm_vmm_map {
struct nvkm_memory *memory; struct nvkm_memory *memory;
...@@ -78,6 +94,12 @@ struct nvkm_vmm_map { ...@@ -78,6 +94,12 @@ struct nvkm_vmm_map {
u64 ctag; u64 ctag;
}; };
int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
struct nvkm_vmm_map *);
void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
struct nvkm_mmu { struct nvkm_mmu {
const struct nvkm_mmu_func *func; const struct nvkm_mmu_func *func;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
......
#ifndef __NOUVEAU_MEM_H__ #ifndef __NOUVEAU_MEM_H__
#define __NOUVEAU_MEM_H__ #define __NOUVEAU_MEM_H__
#include <core/memory.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/mmu.h>
#include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_api.h>
struct ttm_dma_tt; struct ttm_dma_tt;
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include <core/tegra.h> #include <core/tegra.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/ltc.h> #include <subdev/ltc.h>
#include <subdev/mmu.h>
struct gk20a_instobj { struct gk20a_instobj {
struct nvkm_memory memory; struct nvkm_memory memory;
......
...@@ -266,14 +266,14 @@ nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta, ...@@ -266,14 +266,14 @@ nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta,
} }
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex);
nvkm_vmm_ptes_map(vmm, page, ((u64)vma->node->offset << 12) + delta, nvkm_vmm_ptes_map(vmm, page, vma->node->addr + delta,
(u64)vma->node->length << 12, map, fn); vma->node->size, map, fn);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex);
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); nvkm_memory_tags_put(vma->node->memory, vmm->mmu->subdev.device, &vma->node->tags);
nvkm_memory_unref(&vma->memory); nvkm_memory_unref(&vma->node->memory);
vma->memory = nvkm_memory_ref(map->memory); vma->node->memory = nvkm_memory_ref(map->memory);
vma->tags = map->tags; vma->node->tags = map->tags;
} }
void void
...@@ -314,11 +314,9 @@ nvkm_mmu_ptc_init(struct nvkm_mmu *mmu) ...@@ -314,11 +314,9 @@ nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
void void
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page; const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) { if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .mem = node->mem }; struct nvkm_vmm_map map = { .mem = node->mem };
while (page->shift != vma->node->type)
page++;
nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map); nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
return; return;
} }
...@@ -328,11 +326,9 @@ static void ...@@ -328,11 +326,9 @@ static void
nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem) struct nvkm_mem *mem)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page; const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) { if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .sgl = mem->sg->sgl }; struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
while (page->shift != vma->node->type)
page++;
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map); nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
return; return;
} }
...@@ -342,11 +338,9 @@ static void ...@@ -342,11 +338,9 @@ static void
nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem) struct nvkm_mem *mem)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page; const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) { if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .dma = mem->pages }; struct nvkm_vmm_map map = { .dma = mem->pages };
while (page->shift != vma->node->type)
page++;
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map); nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
return; return;
} }
...@@ -364,67 +358,30 @@ nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node) ...@@ -364,67 +358,30 @@ nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
nvkm_vm_map_at(vma, 0, node); nvkm_vm_map_at(vma, 0, node);
} }
void
nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
{
struct nvkm_vm *vm = vma->vm;
if (vm->func->page->desc->func->unmap) {
const struct nvkm_vmm_page *page = vm->func->page;
while (page->shift != vma->node->type)
page++;
mutex_lock(&vm->mutex);
nvkm_vmm_ptes_unmap(vm, page, (vma->node->offset << 12) + delta,
vma->node->length << 12, false);
mutex_unlock(&vm->mutex);
return;
}
}
void void
nvkm_vm_unmap(struct nvkm_vma *vma) nvkm_vm_unmap(struct nvkm_vma *vma)
{ {
nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); nvkm_vmm_unmap(vma->vm, vma->node);
nvkm_memory_tags_put(vma->memory, vma->vm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
} }
int int
nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *vma) struct nvkm_vma *vma)
{ {
u32 align = (1 << page_shift) >> 12;
u32 msize = size >> 12;
int ret; int ret;
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, ret = nvkm_vmm_get_locked(vm, true, false, false, page_shift, 0,
&vma->node); size, &vma->node);
if (unlikely(ret != 0)) {
mutex_unlock(&vm->mutex);
return ret;
}
if (vm->func->page->desc->func->unmap) {
const struct nvkm_vmm_page *page = vm->func->page;
while (page->shift != page_shift)
page++;
ret = nvkm_vmm_ptes_get(vm, page, vma->node->offset << 12,
vma->node->length << 12);
if (ret) {
nvkm_mm_free(&vm->mm, &vma->node);
mutex_unlock(&vm->mutex);
return ret;
}
}
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
if (ret)
return ret;
vma->memory = NULL; vma->memory = NULL;
vma->tags = NULL; vma->tags = NULL;
vma->vm = NULL; vma->vm = NULL;
nvkm_vm_ref(vm, &vma->vm, NULL); nvkm_vm_ref(vm, &vma->vm, NULL);
vma->offset = (u64)vma->node->offset << 12; vma->offset = vma->addr = vma->node->addr;
vma->access = access; vma->access = access;
return 0; return 0;
} }
...@@ -432,30 +389,7 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, ...@@ -432,30 +389,7 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
void void
nvkm_vm_put(struct nvkm_vma *vma) nvkm_vm_put(struct nvkm_vma *vma)
{ {
struct nvkm_mmu *mmu; nvkm_vmm_put(vma->vm, &vma->node);
struct nvkm_vm *vm;
if (unlikely(vma->node == NULL))
return;
vm = vma->vm;
mmu = vm->mmu;
nvkm_memory_tags_put(vma->memory, mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
mutex_lock(&vm->mutex);
if (vm->func->page->desc->func->unmap) {
const struct nvkm_vmm_page *page = vm->func->page;
while (page->shift != vma->node->type)
page++;
nvkm_vmm_ptes_put(vm, page, vma->node->offset << 12,
vma->node->length << 12);
}
nvkm_mm_free(&vm->mm, &vma->node);
mutex_unlock(&vm->mutex);
nvkm_vm_ref(NULL, &vma->vm, NULL); nvkm_vm_ref(NULL, &vma->vm, NULL);
} }
...@@ -465,26 +399,6 @@ nvkm_vm_boot(struct nvkm_vm *vm, u64 size) ...@@ -465,26 +399,6 @@ nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
return nvkm_vmm_boot(vm); return nvkm_vmm_boot(vm);
} }
static int
nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
u32 block, struct nvkm_vm *vm)
{
u64 mm_length = (offset + length) - mm_offset;
int ret;
kref_init(&vm->refcount);
if (block > length)
block = length;
ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
block >> 12);
if (ret)
return ret;
return 0;
}
int int
nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset, nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
struct lock_class_key *key, struct nvkm_vm **pvm) struct lock_class_key *key, struct nvkm_vm **pvm)
...@@ -501,46 +415,28 @@ nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset, ...@@ -501,46 +415,28 @@ nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
return ret; return ret;
} }
ret = nvkm_vm_legacy(mmu, offset, length, mm_offset,
(*pvm)->func->page_block ?
(*pvm)->func->page_block : 4096, *pvm);
if (ret)
nvkm_vm_ref(NULL, pvm, NULL);
return ret; return ret;
} }
return -EINVAL; return -EINVAL;
} }
static void
nvkm_vm_del(struct kref *kref)
{
struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
nvkm_mm_fini(&vm->mm);
if (vm->func)
nvkm_vmm_dtor(vm);
kfree(vm);
}
int int
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst) nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
{ {
if (ref) { if (ref) {
if (ref->func->join && inst) { if (inst) {
int ret = ref->func->join(ref, inst); int ret = nvkm_vmm_join(ref, inst);
if (ret) if (ret)
return ret; return ret;
} }
kref_get(&ref->refcount); nvkm_vmm_ref(ref);
} }
if (*ptr) { if (*ptr) {
if ((*ptr)->func->part && inst) nvkm_vmm_part(*ptr, inst);
(*ptr)->func->part(*ptr, inst); nvkm_vmm_unref(ptr);
kref_put(&(*ptr)->refcount, nvkm_vm_del);
} }
*ptr = ref; *ptr = ref;
...@@ -553,8 +449,8 @@ nvkm_mmu_oneinit(struct nvkm_subdev *subdev) ...@@ -553,8 +449,8 @@ nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
struct nvkm_mmu *mmu = nvkm_mmu(subdev); struct nvkm_mmu *mmu = nvkm_mmu(subdev);
if (mmu->func->vmm.global) { if (mmu->func->vmm.global) {
int ret = nvkm_vm_new(subdev->device, 0, mmu->limit, 0, int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
NULL, &mmu->vmm); "gart", &mmu->vmm);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -576,7 +472,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev) ...@@ -576,7 +472,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
{ {
struct nvkm_mmu *mmu = nvkm_mmu(subdev); struct nvkm_mmu *mmu = nvkm_mmu(subdev);
nvkm_vm_ref(NULL, &mmu->vmm, NULL); nvkm_vmm_unref(&mmu->vmm);
nvkm_mmu_ptc_fini(mmu); nvkm_mmu_ptc_fini(mmu);
return mmu; return mmu;
......
...@@ -153,16 +153,19 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, ...@@ -153,16 +153,19 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *, int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, u64 addr, u64 size, struct lock_class_key *, u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
const char *name, struct nvkm_vmm *); const char *name, struct nvkm_vmm *);
void nvkm_vmm_dtor(struct nvkm_vmm *); struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
void nvkm_vmm_ptes_put(struct nvkm_vmm *, const struct nvkm_vmm_page *, int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
u64 addr, u64 size); bool sparse, u8 page, u8 align, u64 size,
int nvkm_vmm_ptes_get(struct nvkm_vmm *, const struct nvkm_vmm_page *, struct nvkm_vma **pvma);
u64 addr, u64 size); void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
void nvkm_vmm_ptes_map(struct nvkm_vmm *, const struct nvkm_vmm_page *, void nvkm_vmm_ptes_map(struct nvkm_vmm *, const struct nvkm_vmm_page *,
u64 addr, u64 size, struct nvkm_vmm_map *, u64 addr, u64 size, struct nvkm_vmm_map *,
nvkm_vmm_pte_func); nvkm_vmm_pte_func);
void nvkm_vmm_ptes_unmap(struct nvkm_vmm *, const struct nvkm_vmm_page *,
u64 addr, u64 size, bool sparse); struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32, int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
u64, u64, void *, u32, struct lock_class_key *, u64, u64, void *, u32, struct lock_class_key *,
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <subdev/secboot.h> #include <subdev/secboot.h>
#include <subdev/mmu.h> #include <subdev/mmu.h>
struct nvkm_gpuobj;
struct nvkm_secboot_func { struct nvkm_secboot_func {
int (*oneinit)(struct nvkm_secboot *); int (*oneinit)(struct nvkm_secboot *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment