Commit 6b252cf4 authored by Danilo Krummrich's avatar Danilo Krummrich

drm/nouveau: nvkm/vmm: implement raw ops to manage uvmm

The new VM_BIND UAPI uses the DRM GPU VA manager to manage the VA space.
Hence, we a need a way to manipulate the MMUs page tables without going
through the internal range allocator implemented by nvkm/vmm.

This patch adds a raw interface for nvkm/vmm to pass the resposibility
for managing the address space and the corresponding map/unmap/sparse
operations to the upper layers.
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarDanilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230804182406.5222-11-dakr@redhat.com
parent 7576c4ca
...@@ -3,7 +3,10 @@ ...@@ -3,7 +3,10 @@
struct nvif_vmm_v0 { struct nvif_vmm_v0 {
__u8 version; __u8 version;
__u8 page_nr; __u8 page_nr;
__u8 managed; #define NVIF_VMM_V0_TYPE_UNMANAGED 0x00
#define NVIF_VMM_V0_TYPE_MANAGED 0x01
#define NVIF_VMM_V0_TYPE_RAW 0x02
__u8 type;
__u8 pad03[5]; __u8 pad03[5];
__u64 addr; __u64 addr;
__u64 size; __u64 size;
...@@ -17,6 +20,7 @@ struct nvif_vmm_v0 { ...@@ -17,6 +20,7 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP 0x04 #define NVIF_VMM_V0_UNMAP 0x04
#define NVIF_VMM_V0_PFNMAP 0x05 #define NVIF_VMM_V0_PFNMAP 0x05
#define NVIF_VMM_V0_PFNCLR 0x06 #define NVIF_VMM_V0_PFNCLR 0x06
#define NVIF_VMM_V0_RAW 0x07
#define NVIF_VMM_V0_MTHD(i) ((i) + 0x80) #define NVIF_VMM_V0_MTHD(i) ((i) + 0x80)
struct nvif_vmm_page_v0 { struct nvif_vmm_page_v0 {
...@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 { ...@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 {
__u64 addr; __u64 addr;
}; };
struct nvif_vmm_raw_v0 {
__u8 version;
#define NVIF_VMM_RAW_V0_GET 0x0
#define NVIF_VMM_RAW_V0_PUT 0x1
#define NVIF_VMM_RAW_V0_MAP 0x2
#define NVIF_VMM_RAW_V0_UNMAP 0x3
#define NVIF_VMM_RAW_V0_SPARSE 0x4
__u8 op;
__u8 sparse;
__u8 ref;
__u8 shift;
__u32 argc;
__u8 pad01[7];
__u64 addr;
__u64 size;
__u64 offset;
__u64 memory;
__u64 argv;
};
struct nvif_vmm_pfnmap_v0 { struct nvif_vmm_pfnmap_v0 {
__u8 version; __u8 version;
__u8 page; __u8 page;
......
...@@ -4,6 +4,12 @@ ...@@ -4,6 +4,12 @@
struct nvif_mem; struct nvif_mem;
struct nvif_mmu; struct nvif_mmu;
enum nvif_vmm_type {
UNMANAGED,
MANAGED,
RAW,
};
enum nvif_vmm_get { enum nvif_vmm_get {
ADDR, ADDR,
PTES, PTES,
...@@ -30,8 +36,9 @@ struct nvif_vmm { ...@@ -30,8 +36,9 @@ struct nvif_vmm {
int page_nr; int page_nr;
}; };
int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed, int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass,
u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *); enum nvif_vmm_type, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_vmm *);
void nvif_vmm_dtor(struct nvif_vmm *); void nvif_vmm_dtor(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse, int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *); u8 page, u8 align, u64 size, struct nvif_vma *);
...@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *); ...@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc, int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *, u64 offset); struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64); int nvif_vmm_unmap(struct nvif_vmm *, u64);
int nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
int nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
int nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
void *argv, u32 argc, struct nvif_mem *mem, u64 offset);
int nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift, bool sparse);
int nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref);
#endif #endif
...@@ -17,6 +17,7 @@ struct nvkm_vma { ...@@ -17,6 +17,7 @@ struct nvkm_vma {
bool part:1; /* Region was split from an allocated region by map(). */ bool part:1; /* Region was split from an allocated region by map(). */
bool busy:1; /* Region busy (for temporarily preventing user access). */ bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */ bool mapped:1; /* Region contains valid pages. */
bool no_comp:1; /* Force no memory compression. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */ struct nvkm_tags *tags; /* Compression tag reference. */
}; };
...@@ -27,10 +28,26 @@ struct nvkm_vmm { ...@@ -27,10 +28,26 @@ struct nvkm_vmm {
const char *name; const char *name;
u32 debug; u32 debug;
struct kref kref; struct kref kref;
struct mutex mutex;
struct {
struct mutex vmm;
struct mutex ref;
struct mutex map;
} mutex;
u64 start; u64 start;
u64 limit; u64 limit;
struct {
struct {
u64 addr;
u64 size;
} p;
struct {
u64 addr;
u64 size;
} n;
bool raw;
} managed;
struct nvkm_vmm_pt *pd; struct nvkm_vmm_pt *pd;
struct list_head join; struct list_head join;
...@@ -70,6 +87,7 @@ struct nvkm_vmm_map { ...@@ -70,6 +87,7 @@ struct nvkm_vmm_map {
const struct nvkm_vmm_page *page; const struct nvkm_vmm_page *page;
bool no_comp;
struct nvkm_tags *tags; struct nvkm_tags *tags;
u64 next; u64 next;
u64 type; u64 type;
......
...@@ -350,7 +350,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data, ...@@ -350,7 +350,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
* VMM instead of the standard one. * VMM instead of the standard one.
*/ */
ret = nvif_vmm_ctor(&cli->mmu, "svmVmm", ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
cli->vmm.vmm.object.oclass, true, cli->vmm.vmm.object.oclass, MANAGED,
args->unmanaged_addr, args->unmanaged_size, args->unmanaged_addr, args->unmanaged_size,
&(struct gp100_vmm_v0) { &(struct gp100_vmm_v0) {
.fault_replay = true, .fault_replay = true,
......
...@@ -128,8 +128,8 @@ nouveau_vmm_fini(struct nouveau_vmm *vmm) ...@@ -128,8 +128,8 @@ nouveau_vmm_fini(struct nouveau_vmm *vmm)
int int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm) nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{ {
int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE, int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, UNMANAGED,
0, NULL, 0, &vmm->vmm); PAGE_SIZE, 0, NULL, 0, &vmm->vmm);
if (ret) if (ret)
return ret; return ret;
......
...@@ -104,6 +104,90 @@ nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse, ...@@ -104,6 +104,90 @@ nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
return ret; return ret;
} }
int
nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_GET,
.addr = addr,
.size = size,
.shift = shift,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_PUT,
.addr = addr,
.size = size,
.shift = shift,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
void *argv, u32 argc, struct nvif_mem *mem, u64 offset)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_MAP,
.addr = addr,
.size = size,
.shift = shift,
.memory = nvif_handle(&mem->object),
.offset = offset,
.argv = (u64)(uintptr_t)argv,
.argc = argc,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift, bool sparse)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_UNMAP,
.addr = addr,
.size = size,
.shift = shift,
.sparse = sparse,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_SPARSE,
.addr = addr,
.size = size,
.ref = ref,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
void void
nvif_vmm_dtor(struct nvif_vmm *vmm) nvif_vmm_dtor(struct nvif_vmm *vmm)
{ {
...@@ -112,8 +196,9 @@ nvif_vmm_dtor(struct nvif_vmm *vmm) ...@@ -112,8 +196,9 @@ nvif_vmm_dtor(struct nvif_vmm *vmm)
} }
int int
nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed, nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *vmm) enum nvif_vmm_type type, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_vmm *vmm)
{ {
struct nvif_vmm_v0 *args; struct nvif_vmm_v0 *args;
u32 argn = sizeof(*args) + argc; u32 argn = sizeof(*args) + argc;
...@@ -125,9 +210,18 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed, ...@@ -125,9 +210,18 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
if (!(args = kmalloc(argn, GFP_KERNEL))) if (!(args = kmalloc(argn, GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
args->version = 0; args->version = 0;
args->managed = managed;
args->addr = addr; args->addr = addr;
args->size = size; args->size = size;
switch (type) {
case UNMANAGED: args->type = NVIF_VMM_V0_TYPE_UNMANAGED; break;
case MANAGED: args->type = NVIF_VMM_V0_TYPE_MANAGED; break;
case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
default:
WARN_ON(1);
return -EINVAL;
}
memcpy(args->data, argv, argc); memcpy(args->data, argv, argc);
ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0, ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0,
......
...@@ -58,10 +58,13 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -58,10 +58,13 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else } else
return ret; return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
if (size) { if (size) {
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_unmap(vmm, addr, size); ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
} }
return ret; return ret;
...@@ -88,10 +91,13 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -88,10 +91,13 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else } else
return ret; return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
if (size) { if (size) {
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys); ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
} }
return ret; return ret;
...@@ -113,7 +119,10 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -113,7 +119,10 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else } else
return ret; return ret;
mutex_lock(&vmm->mutex); if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
return -EINVAL;
mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, addr); vma = nvkm_vmm_node_search(vmm, addr);
if (ret = -ENOENT, !vma || vma->addr != addr) { if (ret = -ENOENT, !vma || vma->addr != addr) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx", VMM_DEBUG(vmm, "lookup %016llx: %016llx",
...@@ -134,7 +143,7 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -134,7 +143,7 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
nvkm_vmm_unmap_locked(vmm, vma, false); nvkm_vmm_unmap_locked(vmm, vma, false);
ret = 0; ret = 0;
done: done:
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
return ret; return ret;
} }
...@@ -159,13 +168,16 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -159,13 +168,16 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else } else
return ret; return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
memory = nvkm_umem_search(client, handle); memory = nvkm_umem_search(client, handle);
if (IS_ERR(memory)) { if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory); return PTR_ERR(memory);
} }
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
VMM_DEBUG(vmm, "lookup %016llx", addr); VMM_DEBUG(vmm, "lookup %016llx", addr);
goto fail; goto fail;
...@@ -198,7 +210,7 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -198,7 +210,7 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} }
} }
vma->busy = true; vma->busy = true;
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc); ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
if (ret == 0) { if (ret == 0) {
...@@ -207,11 +219,11 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -207,11 +219,11 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0; return 0;
} }
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
vma->busy = false; vma->busy = false;
nvkm_vmm_unmap_region(vmm, vma); nvkm_vmm_unmap_region(vmm, vma);
fail: fail:
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
nvkm_memory_unref(&memory); nvkm_memory_unref(&memory);
return ret; return ret;
} }
...@@ -232,7 +244,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -232,7 +244,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else } else
return ret; return ret;
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, args->v0.addr); vma = nvkm_vmm_node_search(vmm, args->v0.addr);
if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) { if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr, VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
...@@ -248,7 +260,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -248,7 +260,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
nvkm_vmm_put_locked(vmm, vma); nvkm_vmm_put_locked(vmm, vma);
ret = 0; ret = 0;
done: done:
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
return ret; return ret;
} }
...@@ -275,10 +287,10 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -275,10 +287,10 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else } else
return ret; return ret;
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse, ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
page, align, size, &vma); page, align, size, &vma);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
if (ret) if (ret)
return ret; return ret;
...@@ -314,6 +326,167 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -314,6 +326,167 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0; return 0;
} }
static inline int
nvkm_uvmm_page_index(struct nvkm_uvmm *uvmm, u64 size, u8 shift, u8 *refd)
{
struct nvkm_vmm *vmm = uvmm->vmm;
const struct nvkm_vmm_page *page;
if (likely(shift)) {
for (page = vmm->func->page; page->shift; page++) {
if (shift == page->shift)
break;
}
if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
VMM_DEBUG(vmm, "page %d %016llx", shift, size);
return -EINVAL;
}
} else {
return -EINVAL;
}
*refd = page - vmm->func->page;
return 0;
}
static int
nvkm_uvmm_mthd_raw_get(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
}
static int
nvkm_uvmm_mthd_raw_put(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
return 0;
}
static int
nvkm_uvmm_mthd_raw_map(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_client *client = uvmm->object.client;
struct nvkm_vmm *vmm = uvmm->vmm;
struct nvkm_vma vma = {
.addr = args->addr,
.size = args->size,
.used = true,
.mapref = false,
.no_comp = true,
};
struct nvkm_memory *memory;
u64 handle = args->memory;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
vma.page = vma.refd = refd;
memory = nvkm_umem_search(client, args->memory);
if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
ret = nvkm_memory_map(memory, args->offset, vmm, &vma,
(void *)args->argv, args->argc);
nvkm_memory_unref(&vma.memory);
nvkm_memory_unref(&memory);
return ret;
}
static int
nvkm_uvmm_mthd_raw_unmap(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
args->sparse, refd);
return 0;
}
static int
nvkm_uvmm_mthd_raw_sparse(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
}
static int
nvkm_uvmm_mthd_raw(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_raw_v0 v0;
} *args = argv;
int ret = -ENOSYS;
if (!uvmm->vmm->managed.raw)
return -EINVAL;
if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true)))
return ret;
switch (args->v0.op) {
case NVIF_VMM_RAW_V0_GET:
return nvkm_uvmm_mthd_raw_get(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_PUT:
return nvkm_uvmm_mthd_raw_put(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_MAP:
return nvkm_uvmm_mthd_raw_map(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_UNMAP:
return nvkm_uvmm_mthd_raw_unmap(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_SPARSE:
return nvkm_uvmm_mthd_raw_sparse(uvmm, &args->v0);
default:
return -EINVAL;
};
}
static int static int
nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc) nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{ {
...@@ -326,6 +499,7 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc) ...@@ -326,6 +499,7 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc); case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc); case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc); case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
case NVIF_VMM_V0_RAW : return nvkm_uvmm_mthd_raw (uvmm, argv, argc);
case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f): case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
if (uvmm->vmm->func->mthd) { if (uvmm->vmm->func->mthd) {
return uvmm->vmm->func->mthd(uvmm->vmm, return uvmm->vmm->func->mthd(uvmm->vmm,
...@@ -366,10 +540,11 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, ...@@ -366,10 +540,11 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_uvmm *uvmm; struct nvkm_uvmm *uvmm;
int ret = -ENOSYS; int ret = -ENOSYS;
u64 addr, size; u64 addr, size;
bool managed; bool managed, raw;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) { if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
managed = args->v0.managed != 0; managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
raw = args->v0.type == NVIF_VMM_V0_TYPE_RAW;
addr = args->v0.addr; addr = args->v0.addr;
size = args->v0.size; size = args->v0.size;
} else } else
...@@ -377,12 +552,13 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, ...@@ -377,12 +552,13 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL))) if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object); nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
*pobject = &uvmm->object; *pobject = &uvmm->object;
if (!mmu->vmm) { if (!mmu->vmm) {
ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc, ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
NULL, "user", &uvmm->vmm); argv, argc, NULL, "user", &uvmm->vmm);
if (ret) if (ret)
return ret; return ret;
...@@ -393,6 +569,7 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, ...@@ -393,6 +569,7 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
uvmm->vmm = nvkm_vmm_ref(mmu->vmm); uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
} }
uvmm->vmm->managed.raw = raw;
page = uvmm->vmm->func->page; page = uvmm->vmm->func->page;
args->v0.page_nr = 0; args->v0.page_nr = 0;
......
...@@ -676,41 +676,18 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref) ...@@ -676,41 +676,18 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
return 0; return 0;
} }
static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
}
static int
nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
false, nvkm_vmm_ref_ptes, func, map, NULL);
if (fail != ~0ULL) {
if ((size = fail - addr))
nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
return -ENOMEM;
}
return 0;
}
static void static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn) u64 addr, u64 size, bool sparse, bool pfn)
{ {
const struct nvkm_vmm_desc_func *func = page->desc->func; const struct nvkm_vmm_desc_func *func = page->desc->func;
mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn, nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
NULL, NULL, NULL, NULL, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid : sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap); func->unmap);
mutex_unlock(&vmm->mutex.map);
} }
static void static void
...@@ -718,33 +695,108 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, ...@@ -718,33 +695,108 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map, u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func) nvkm_vmm_pte_func func)
{ {
mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "map", false, false, nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
NULL, func, map, NULL); NULL, func, map, NULL);
mutex_unlock(&vmm->mutex.map);
} }
static void static void
nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size) u64 addr, u64 size)
{ {
nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false, nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
nvkm_vmm_unref_ptes, NULL, NULL, NULL); nvkm_vmm_unref_ptes, NULL, NULL, NULL);
} }
static void
nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
mutex_lock(&vmm->mutex.ref);
nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
mutex_unlock(&vmm->mutex.ref);
}
static int static int
nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size) u64 addr, u64 size)
{ {
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false, u64 fail;
nvkm_vmm_ref_ptes, NULL, NULL, NULL);
mutex_lock(&vmm->mutex.ref);
fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
nvkm_vmm_ref_ptes, NULL, NULL, NULL);
if (fail != ~0ULL) { if (fail != ~0ULL) {
if (fail != addr) if (fail != addr)
nvkm_vmm_ptes_put(vmm, page, addr, fail - addr); nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
mutex_unlock(&vmm->mutex.ref);
return -ENOMEM;
}
mutex_unlock(&vmm->mutex.ref);
return 0;
}
static void
__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
}
static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
if (vmm->managed.raw) {
nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
nvkm_vmm_ptes_put(vmm, page, addr, size);
} else {
__nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
}
}
static int
__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
false, nvkm_vmm_ref_ptes, func, map, NULL);
if (fail != ~0ULL) {
if ((size = fail - addr))
nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
return -ENOMEM; return -ENOMEM;
} }
return 0; return 0;
} }
static inline struct nvkm_vma * static int
nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
int ret;
if (vmm->managed.raw) {
ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
if (ret)
return ret;
nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
return 0;
} else {
return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
}
}
struct nvkm_vma *
nvkm_vma_new(u64 addr, u64 size) nvkm_vma_new(u64 addr, u64 size)
{ {
struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
...@@ -1045,7 +1097,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, ...@@ -1045,7 +1097,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
vmm->debug = mmu->subdev.debug; vmm->debug = mmu->subdev.debug;
kref_init(&vmm->kref); kref_init(&vmm->kref);
__mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key); __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
mutex_init(&vmm->mutex.ref);
mutex_init(&vmm->mutex.map);
/* Locate the smallest page size supported by the backend, it will /* Locate the smallest page size supported by the backend, it will
* have the deepest nesting of page tables. * have the deepest nesting of page tables.
...@@ -1101,6 +1155,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, ...@@ -1101,6 +1155,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr))) if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
return ret; return ret;
vmm->managed.p.addr = 0;
vmm->managed.p.size = addr;
/* NVKM-managed area. */ /* NVKM-managed area. */
if (size) { if (size) {
if (!(vma = nvkm_vma_new(addr, size))) if (!(vma = nvkm_vma_new(addr, size)))
...@@ -1114,6 +1171,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, ...@@ -1114,6 +1171,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
size = vmm->limit - addr; size = vmm->limit - addr;
if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size))) if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
return ret; return ret;
vmm->managed.n.addr = addr;
vmm->managed.n.size = size;
} else { } else {
/* Address-space fully managed by NVKM, requiring calls to /* Address-space fully managed by NVKM, requiring calls to
* nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space. * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
...@@ -1362,9 +1422,9 @@ void ...@@ -1362,9 +1422,9 @@ void
nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma) nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{ {
if (vma->memory) { if (vma->memory) {
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_unmap_locked(vmm, vma, false); nvkm_vmm_unmap_locked(vmm, vma, false);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
} }
} }
...@@ -1423,6 +1483,8 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, ...@@ -1423,6 +1483,8 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
nvkm_vmm_pte_func func; nvkm_vmm_pte_func func;
int ret; int ret;
map->no_comp = vma->no_comp;
/* Make sure we won't overrun the end of the memory object. */ /* Make sure we won't overrun the end of the memory object. */
if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) { if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx", VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
...@@ -1507,10 +1569,15 @@ nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, ...@@ -1507,10 +1569,15 @@ nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
struct nvkm_vmm_map *map) struct nvkm_vmm_map *map)
{ {
int ret; int ret;
mutex_lock(&vmm->mutex);
if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
vmm->managed.raw)
return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map); ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
vma->busy = false; vma->busy = false;
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
return ret; return ret;
} }
...@@ -1620,9 +1687,9 @@ nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma) ...@@ -1620,9 +1687,9 @@ nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
{ {
struct nvkm_vma *vma = *pvma; struct nvkm_vma *vma = *pvma;
if (vma) { if (vma) {
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_put_locked(vmm, vma); nvkm_vmm_put_locked(vmm, vma);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
*pvma = NULL; *pvma = NULL;
} }
} }
...@@ -1769,9 +1836,49 @@ int ...@@ -1769,9 +1836,49 @@ int
nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
{ {
int ret; int ret;
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma); ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
return ret;
}
void
nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
bool sparse, u8 refd)
{
const struct nvkm_vmm_page *page = &vmm->func->page[refd];
nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
}
void
nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
{
const struct nvkm_vmm_page *page = vmm->func->page;
nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
}
int
nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
{
const struct nvkm_vmm_page *page = vmm->func->page;
if (unlikely(!size))
return -EINVAL;
return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
}
int
nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
{
int ret;
mutex_lock(&vmm->mutex.ref);
ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
mutex_unlock(&vmm->mutex.ref);
return ret; return ret;
} }
...@@ -1779,9 +1886,9 @@ void ...@@ -1779,9 +1886,9 @@ void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{ {
if (inst && vmm && vmm->func->part) { if (inst && vmm && vmm->func->part) {
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
vmm->func->part(vmm, inst); vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
} }
} }
...@@ -1790,9 +1897,9 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) ...@@ -1790,9 +1897,9 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{ {
int ret = 0; int ret = 0;
if (vmm->func->join) { if (vmm->func->join) {
mutex_lock(&vmm->mutex); mutex_lock(&vmm->mutex.vmm);
ret = vmm->func->join(vmm, inst); ret = vmm->func->join(vmm, inst);
mutex_unlock(&vmm->mutex); mutex_unlock(&vmm->mutex.vmm);
} }
return ret; return ret;
} }
......
...@@ -163,6 +163,7 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, ...@@ -163,6 +163,7 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, bool managed, u64 addr, u64 size, u32 pd_header, bool managed, u64 addr, u64 size,
struct lock_class_key *, const char *name, struct lock_class_key *, const char *name,
struct nvkm_vmm **); struct nvkm_vmm **);
struct nvkm_vma *nvkm_vma_new(u64 addr, u64 size);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr); struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *, struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
u64 addr, u64 size); u64 addr, u64 size);
...@@ -173,6 +174,30 @@ void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *); ...@@ -173,6 +174,30 @@ void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn); void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *); void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
int nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
void nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
void nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
bool sparse, u8 refd);
int nvkm_vmm_raw_sparse(struct nvkm_vmm *, u64 addr, u64 size, bool ref);
static inline bool
nvkm_vmm_in_managed_range(struct nvkm_vmm *vmm, u64 start, u64 size)
{
u64 p_start = vmm->managed.p.addr;
u64 p_end = p_start + vmm->managed.p.size;
u64 n_start = vmm->managed.n.addr;
u64 n_end = n_start + vmm->managed.n.size;
u64 end = start + size;
if (start >= p_start && end <= p_end)
return true;
if (start >= n_start && end <= n_end)
return true;
return false;
}
#define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL #define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL
#define NVKM_VMM_PFN_ADDR_SHIFT 12 #define NVKM_VMM_PFN_ADDR_SHIFT 12
#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL #define NVKM_VMM_PFN_APER 0x00000000000000f0ULL
......
...@@ -287,15 +287,17 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, ...@@ -287,15 +287,17 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL; return -EINVAL;
} }
ret = nvkm_memory_tags_get(memory, device, tags, if (!map->no_comp) {
nvkm_ltc_tags_clear, ret = nvkm_memory_tags_get(memory, device, tags,
&map->tags); nvkm_ltc_tags_clear,
if (ret) { &map->tags);
VMM_DEBUG(vmm, "comp %d", ret); if (ret) {
return ret; VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
} }
if (map->tags->mn) { if (!map->no_comp && map->tags->mn) {
u64 tags = map->tags->mn->offset + (map->offset >> 17); u64 tags = map->tags->mn->offset + (map->offset >> 17);
if (page->shift == 17 || !gm20x) { if (page->shift == 17 || !gm20x) {
map->type |= tags << 44; map->type |= tags << 44;
......
...@@ -453,15 +453,17 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, ...@@ -453,15 +453,17 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL; return -EINVAL;
} }
ret = nvkm_memory_tags_get(memory, device, tags, if (!map->no_comp) {
nvkm_ltc_tags_clear, ret = nvkm_memory_tags_get(memory, device, tags,
&map->tags); nvkm_ltc_tags_clear,
if (ret) { &map->tags);
VMM_DEBUG(vmm, "comp %d", ret); if (ret) {
return ret; VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
} }
if (map->tags->mn) { if (!map->no_comp && map->tags->mn) {
tags = map->tags->mn->offset + (map->offset >> 16); tags = map->tags->mn->offset + (map->offset >> 16);
map->ctag |= ((1ULL << page->shift) >> 16) << 36; map->ctag |= ((1ULL << page->shift) >> 16) << 36;
map->type |= tags << 36; map->type |= tags << 36;
......
...@@ -296,19 +296,22 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, ...@@ -296,19 +296,22 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL; return -EINVAL;
} }
ret = nvkm_memory_tags_get(memory, device, tags, NULL, if (!map->no_comp) {
&map->tags); ret = nvkm_memory_tags_get(memory, device, tags, NULL,
if (ret) { &map->tags);
VMM_DEBUG(vmm, "comp %d", ret); if (ret) {
return ret; VMM_DEBUG(vmm, "comp %d", ret);
} return ret;
}
if (map->tags->mn) { if (map->tags->mn) {
u32 tags = map->tags->mn->offset + (map->offset >> 16); u32 tags = map->tags->mn->offset +
map->ctag |= (u64)comp << 49; (map->offset >> 16);
map->type |= (u64)comp << 47; map->ctag |= (u64)comp << 49;
map->type |= (u64)tags << 49; map->type |= (u64)comp << 47;
map->next |= map->ctag; map->type |= (u64)tags << 49;
map->next |= map->ctag;
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment