Commit 632b740c authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: remove old vmm frontend

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 7dc6a446
......@@ -63,10 +63,8 @@ u64 nvif_device_time(struct nvif_device *);
#define nvxx_therm(a) nvxx_device(a)->therm
#define nvxx_volt(a) nvxx_device(a)->volt
#include <core/device.h>
#include <engine/fifo.h>
#include <engine/gr.h>
#include <engine/sw.h>
#define nvxx_fifo(a) nvxx_device(a)->fifo
#define nvxx_gr(a) nvxx_device(a)->gr
......
......@@ -16,8 +16,6 @@ struct nvkm_client {
void *data;
int (*ntfy)(const void *, u32, const void *, u32);
struct nvkm_vm *vm;
struct list_head umem;
spinlock_t lock;
};
......
#ifndef __NVKM_OS_H__
#define __NVKM_OS_H__
#include <nvif/os.h>
#define nvkm_vmm nvkm_vm
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
......@@ -21,5 +20,4 @@
iowrite32_native(lower_32_bits(_v), &_p[0]); \
iowrite32_native(upper_32_bits(_v), &_p[1]); \
} while(0)
#endif
......@@ -20,17 +20,6 @@
#define NVKM_RAM_TYPE_VM 0x7f
#define NV_MEM_COMP_VM 0x03
struct nvkm_mem {
struct nvkm_mm_node *mem;
dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
struct sg_table *sg;
struct nvkm_memory *memory;
};
struct nvkm_fb_tile {
struct nvkm_mm_node *tag;
u32 addr;
......@@ -57,7 +46,6 @@ struct nvkm_fb {
struct nvkm_memory *mmu_wr;
};
bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
......@@ -163,6 +151,4 @@ struct nvkm_ram_func {
int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *);
};
extern const u8 gf100_pte_storage_type_map[256];
#endif
#ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__
#include <core/subdev.h>
struct nvkm_mem;
struct nvkm_vm_pgt {
struct nvkm_memory *mem[2];
u32 refcount[2];
};
struct nvkm_vma {
struct list_head head;
......@@ -24,14 +18,9 @@ struct nvkm_vma {
bool busy:1; /* Region busy (for temporarily preventing user access). */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */
struct nvkm_vma *node;
struct nvkm_vm *vm;
u64 offset;
u32 access;
};
struct nvkm_vm {
struct nvkm_vmm {
const struct nvkm_vmm_func *func;
struct nvkm_mmu *mmu;
const char *name;
......@@ -56,17 +45,6 @@ struct nvkm_vm {
void *nullp;
};
int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
struct lock_class_key *, struct nvkm_vm **);
int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_memory *inst);
int nvkm_vm_boot(struct nvkm_vm *, u64 size);
int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *);
void nvkm_vm_put(struct nvkm_vma *);
void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
void nvkm_vm_unmap(struct nvkm_vma *);
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
struct lock_class_key *, const char *name, struct nvkm_vmm **);
struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
......@@ -105,9 +83,7 @@ struct nvkm_mmu {
const struct nvkm_mmu_func *func;
struct nvkm_subdev subdev;
u64 limit;
u8 dma_bits;
u8 lpg_shift;
int heap_nr;
struct {
......
......@@ -271,12 +271,6 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
}
cli->mem = &mems[ret];
if (1) {
cli->vm = cli->vmm.vm;
nvxx_client(&cli->base)->vm = cli->vm;
}
return 0;
done:
if (ret)
......
......@@ -97,7 +97,6 @@ struct nouveau_cli {
struct nouveau_vmm vmm;
const struct nvif_mclass *mem;
struct nvkm_vm *vm;
struct list_head head;
void *abi16;
struct list_head objects;
......
......@@ -267,18 +267,11 @@ int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
......
......@@ -131,6 +131,5 @@ nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
return ret;
vmm->cli = cli;
vmm->vm = nvkm_uvmm(vmm->vmm.object.priv)->vmm;
return 0;
}
#ifndef __NOUVEAU_VMA_H__
#define __NOUVEAU_VMA_H__
#include <subdev/mmu/uvmm.h>
#include <nvif/vmm.h>
struct nouveau_bo;
struct nouveau_mem;
......
......@@ -31,12 +31,6 @@
#include <engine/gr.h>
#include <engine/mpeg.h>
bool
nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
{
return fb->func->memtype_valid(fb, memtype);
}
void
nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
{
......
......@@ -27,15 +27,6 @@
#include <core/memory.h>
#include <core/option.h>
extern const u8 gf100_pte_storage_type_map[256];
bool
gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
{
u8 memtype = (tile_flags & 0x0000ff00) >> 8;
return likely((gf100_pte_storage_type_map[memtype] != 0xff));
}
void
gf100_fb_intr(struct nvkm_fb *base)
{
......@@ -140,7 +131,6 @@ gf100_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17,
};
......
......@@ -32,7 +32,6 @@ gf108_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf108_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17,
};
......
......@@ -32,7 +32,6 @@ gk104_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17,
};
......
......@@ -30,7 +30,6 @@ gk20a_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17,
};
......
......@@ -32,7 +32,6 @@ gm107_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17,
};
......
......@@ -65,7 +65,6 @@ gm200_fb = {
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm200_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 0 /* per-instance. */,
};
......
......@@ -30,7 +30,6 @@ gm20b_fb = {
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 0 /* per-instance. */,
};
......
......@@ -59,7 +59,6 @@ gp100_fb = {
.init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.ram_new = gp100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
};
int
......
......@@ -33,7 +33,6 @@ gp102_fb = {
.init = gp100_fb_init,
.init_page = gm200_fb_init_page,
.ram_new = gp100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
};
int
......
......@@ -28,7 +28,6 @@ gp10b_fb = {
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
};
int
......
......@@ -25,14 +25,6 @@
#include "ram.h"
#include "regsnv04.h"
bool
nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
{
if (!(tile_flags & 0xff00))
return true;
return false;
}
static void
nv04_fb_init(struct nvkm_fb *fb)
{
......@@ -49,7 +41,6 @@ static const struct nvkm_fb_func
nv04_fb = {
.init = nv04_fb_init,
.ram_new = nv04_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -61,7 +61,6 @@ nv10_fb = {
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
.ram_new = nv10_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -33,7 +33,6 @@ nv1a_fb = {
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
.ram_new = nv1a_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -93,7 +93,6 @@ nv20_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -51,7 +51,6 @@ nv25_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -124,7 +124,6 @@ nv30_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -53,7 +53,6 @@ nv35_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -53,7 +53,6 @@ nv36_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -59,7 +59,6 @@ nv40_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv40_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -53,7 +53,6 @@ nv41_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -62,7 +62,6 @@ nv44_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -48,7 +48,6 @@ nv46_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -36,7 +36,6 @@ nv47_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -36,7 +36,6 @@ nv49_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv49_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -34,7 +34,6 @@ nv4e_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
};
int
......
......@@ -28,18 +28,6 @@
#include <core/enum.h>
#include <engine/fifo.h>
int
nv50_fb_memtype[0x80] = {
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
};
static int
nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
{
......@@ -47,12 +35,6 @@ nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
return fb->func->ram_new(&fb->base, pram);
}
static bool
nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
{
return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
}
static const struct nvkm_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX" },
{ 0x00000001, "NOTIFY" },
......@@ -276,7 +258,6 @@ nv50_fb_ = {
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new,
.memtype_valid = nv50_fb_memtype_valid,
};
int
......
......@@ -18,5 +18,4 @@ struct nv50_fb_func {
int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
struct nvkm_fb **pfb);
extern int nv50_fb_memtype[0x80];
#endif
......@@ -25,8 +25,6 @@ struct nvkm_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
u8 default_bigpage;
};
......@@ -36,8 +34,6 @@ int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
int index, struct nvkm_fb **);
int nvkm_fb_bios_memtype(struct nvkm_bios *);
bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
......@@ -67,7 +63,6 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
int gf100_fb_oneinit(struct nvkm_fb *);
int gf100_fb_init_page(struct nvkm_fb *);
bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
int gm200_fb_init_page(struct nvkm_fb *);
#endif
......@@ -45,15 +45,6 @@ nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
.mem = vram->mn,
};
if (vma->vm) {
struct nvkm_mem mem = {
.mem = vram->mn,
.memory = &vram->memory,
};
nvkm_vm_map_at(vma, offset, &mem);
return 0;
}
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
......
......@@ -44,9 +44,7 @@
#include "priv.h"
#include <core/memory.h>
#include <core/mm.h>
#include <core/tegra.h>
#include <subdev/fb.h>
#include <subdev/ltc.h>
#include <subdev/mmu.h>
......@@ -290,15 +288,6 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
.mem = node->mn,
};
if (vma->vm) {
struct nvkm_mem mem = {
.mem = node->mn,
.memory = &node->memory,
};
nvkm_vm_map_at(vma, 0, &mem);
return 0;
}
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
......
......@@ -216,67 +216,6 @@ nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
return pt;
}
static void
nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta,
struct nvkm_mem *mem, nvkm_vmm_pte_func fn,
struct nvkm_vmm_map *map)
{
union {
struct nv50_vmm_map_v0 nv50;
struct gf100_vmm_map_v0 gf100;
} args;
struct nvkm_vmm *vmm = vma->vm;
void *argv = NULL;
u32 argc = 0;
int ret;
map->memory = mem->memory;
map->page = page;
if (vmm->func->valid) {
switch (vmm->mmu->subdev.device->card_type) {
case NV_50:
args.nv50.version = 0;
args.nv50.ro = !(vma->access & NV_MEM_ACCESS_WO);
args.nv50.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
args.nv50.kind = (mem->memtype & 0x07f);
args.nv50.comp = (mem->memtype & 0x180) >> 7;
argv = &args.nv50;
argc = sizeof(args.nv50);
break;
case NV_C0:
case NV_E0:
case GM100:
case GP100: {
args.gf100.version = 0;
args.gf100.vol = (nvkm_memory_target(map->memory) != NVKM_MEM_TARGET_VRAM);
args.gf100.ro = !(vma->access & NV_MEM_ACCESS_WO);
args.gf100.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
args.gf100.kind = (mem->memtype & 0x0ff);
argv = &args.gf100;
argc = sizeof(args.gf100);
}
break;
default:
break;
}
ret = vmm->func->valid(vmm, argv, argc, map);
if (WARN_ON(ret))
return;
}
mutex_lock(&vmm->mutex);
nvkm_vmm_ptes_map(vmm, page, vma->node->addr + delta,
vma->node->size, map, fn);
mutex_unlock(&vmm->mutex);
nvkm_memory_tags_put(vma->node->memory, vmm->mmu->subdev.device, &vma->node->tags);
nvkm_memory_unref(&vma->node->memory);
vma->node->memory = nvkm_memory_ref(map->memory);
vma->node->tags = map->tags;
}
void
nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
{
......@@ -312,138 +251,6 @@ nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
INIT_LIST_HEAD(&mmu->ptp.list);
}
void
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
{
const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .mem = node->mem };
nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
return;
}
}
static void
nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem)
{
const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
return;
}
}
static void
nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem)
{
const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .dma = mem->pages };
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
return;
}
}
void
nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
{
if (node->sg)
nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
else
if (node->pages)
nvkm_vm_map_sg(vma, 0, node->size << 12, node);
else
nvkm_vm_map_at(vma, 0, node);
}
void
nvkm_vm_unmap(struct nvkm_vma *vma)
{
nvkm_vmm_unmap(vma->vm, vma->node);
}
int
nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *vma)
{
int ret;
mutex_lock(&vm->mutex);
ret = nvkm_vmm_get_locked(vm, true, false, false, page_shift, 0,
size, &vma->node);
mutex_unlock(&vm->mutex);
if (ret)
return ret;
vma->memory = NULL;
vma->tags = NULL;
vma->vm = NULL;
nvkm_vm_ref(vm, &vma->vm, NULL);
vma->offset = vma->addr = vma->node->addr;
vma->access = access;
return 0;
}
void
nvkm_vm_put(struct nvkm_vma *vma)
{
nvkm_vmm_put(vma->vm, &vma->node);
nvkm_vm_ref(NULL, &vma->vm, NULL);
}
int
nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
{
return nvkm_vmm_boot(vm);
}
int
nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
struct lock_class_key *key, struct nvkm_vm **pvm)
{
struct nvkm_mmu *mmu = device->mmu;
*pvm = NULL;
if (mmu->func->vmm.ctor) {
int ret = mmu->func->vmm.ctor(mmu, mm_offset,
offset + length - mm_offset,
NULL, 0, key, "legacy", pvm);
if (ret) {
nvkm_vm_ref(NULL, pvm, NULL);
return ret;
}
return ret;
}
return -EINVAL;
}
int
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
{
if (ref) {
if (inst) {
int ret = nvkm_vmm_join(ref, inst);
if (ret)
return ret;
}
nvkm_vmm_ref(ref);
}
if (*ptr) {
nvkm_vmm_part(*ptr, inst);
nvkm_vmm_unref(ptr);
}
*ptr = ref;
return 0;
}
static void
nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
{
......@@ -611,9 +418,7 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
{
nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
mmu->func = func;
mmu->limit = func->limit;
mmu->dma_bits = func->dma_bits;
mmu->lpg_shift = func->lpg_shift;
nvkm_mmu_ptc_init(mmu);
mmu->user.ctor = nvkm_ummu_new;
mmu->user.base = func->mmu.user;
......
......@@ -26,9 +26,7 @@
static const struct nvkm_mmu_func
g84_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
......
......@@ -29,54 +29,52 @@
/* Map from compressed to corresponding uncompressed storage type.
* The value 0xff represents an invalid storage type.
*/
const u8 gf100_pte_storage_type_map[256] =
{
0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
};
const u8 *
gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
{
*count = ARRAY_SIZE(gf100_pte_storage_type_map);
return gf100_pte_storage_type_map;
static const u8
kind[256] = {
0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
};
*count = ARRAY_SIZE(kind);
return kind;
}
static const struct nvkm_mmu_func
gf100_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
......
......@@ -26,9 +26,7 @@
static const struct nvkm_mmu_func
gk104_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
......
......@@ -26,9 +26,7 @@
static const struct nvkm_mmu_func
gk20a_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
......
......@@ -70,9 +70,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
static const struct nvkm_mmu_func
gm200_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
......@@ -82,9 +80,7 @@ gm200_mmu = {
static const struct nvkm_mmu_func
gm200_mmu_fixed = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
......
......@@ -28,9 +28,7 @@
static const struct nvkm_mmu_func
gm20b_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
......@@ -40,9 +38,7 @@ gm20b_mmu = {
static const struct nvkm_mmu_func
gm20b_mmu_fixed = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
......
......@@ -28,9 +28,7 @@
static const struct nvkm_mmu_func
gp100_mmu = {
.limit = (1ULL << 49),
.dma_bits = 47,
.lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
......
......@@ -28,9 +28,7 @@
static const struct nvkm_mmu_func
gp10b_mmu = {
.limit = (1ULL << 49),
.dma_bits = 47,
.lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
......
......@@ -26,13 +26,9 @@
#include <nvif/class.h>
#define NV04_PDMA_SIZE (128 * 1024 * 1024)
const struct nvkm_mmu_func
nv04_mmu = {
.limit = NV04_PDMA_SIZE,
.dma_bits = 32,
.lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
......
......@@ -28,8 +28,6 @@
#include <nvif/class.h>
#define NV41_GART_SIZE (512 * 1024 * 1024)
static void
nv41_mmu_init(struct nvkm_mmu *mmu)
{
......@@ -42,9 +40,7 @@ nv41_mmu_init(struct nvkm_mmu *mmu)
static const struct nvkm_mmu_func
nv41_mmu = {
.init = nv41_mmu_init,
.limit = NV41_GART_SIZE,
.dma_bits = 39,
.lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
......
......@@ -28,8 +28,6 @@
#include <nvif/class.h>
#define NV44_GART_SIZE (512 * 1024 * 1024)
static void
nv44_mmu_init(struct nvkm_mmu *mmu)
{
......@@ -57,9 +55,7 @@ nv44_mmu_init(struct nvkm_mmu *mmu)
static const struct nvkm_mmu_func
nv44_mmu = {
.init = nv44_mmu_init,
.limit = NV44_GART_SIZE,
.dma_bits = 39,
.lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
......
......@@ -62,9 +62,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count)
static const struct nvkm_mmu_func
nv50_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40,
.lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
......
......@@ -11,9 +11,7 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
struct nvkm_mmu_func {
void (*init)(struct nvkm_mmu *);
u64 limit;
u8 dma_bits;
u8 lpg_shift;
struct {
struct nvkm_sclass user;
......
......@@ -700,7 +700,7 @@ nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
func->unmap);
}
void
static void
nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
......
......@@ -160,9 +160,6 @@ int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
void nvkm_vmm_ptes_map(struct nvkm_vmm *, const struct nvkm_vmm_page *,
u64 addr, u64 size, struct nvkm_vmm_map *,
nvkm_vmm_pte_func);
struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment