Commit 632b740c authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: remove old vmm frontend

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 7dc6a446
...@@ -63,10 +63,8 @@ u64 nvif_device_time(struct nvif_device *); ...@@ -63,10 +63,8 @@ u64 nvif_device_time(struct nvif_device *);
#define nvxx_therm(a) nvxx_device(a)->therm #define nvxx_therm(a) nvxx_device(a)->therm
#define nvxx_volt(a) nvxx_device(a)->volt #define nvxx_volt(a) nvxx_device(a)->volt
#include <core/device.h>
#include <engine/fifo.h> #include <engine/fifo.h>
#include <engine/gr.h> #include <engine/gr.h>
#include <engine/sw.h>
#define nvxx_fifo(a) nvxx_device(a)->fifo #define nvxx_fifo(a) nvxx_device(a)->fifo
#define nvxx_gr(a) nvxx_device(a)->gr #define nvxx_gr(a) nvxx_device(a)->gr
......
...@@ -16,8 +16,6 @@ struct nvkm_client { ...@@ -16,8 +16,6 @@ struct nvkm_client {
void *data; void *data;
int (*ntfy)(const void *, u32, const void *, u32); int (*ntfy)(const void *, u32, const void *, u32);
struct nvkm_vm *vm;
struct list_head umem; struct list_head umem;
spinlock_t lock; spinlock_t lock;
}; };
......
#ifndef __NVKM_OS_H__ #ifndef __NVKM_OS_H__
#define __NVKM_OS_H__ #define __NVKM_OS_H__
#include <nvif/os.h> #include <nvif/os.h>
#define nvkm_vmm nvkm_vm
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
#define ioread16_native ioread16be #define ioread16_native ioread16be
...@@ -21,5 +20,4 @@ ...@@ -21,5 +20,4 @@
iowrite32_native(lower_32_bits(_v), &_p[0]); \ iowrite32_native(lower_32_bits(_v), &_p[0]); \
iowrite32_native(upper_32_bits(_v), &_p[1]); \ iowrite32_native(upper_32_bits(_v), &_p[1]); \
} while(0) } while(0)
#endif #endif
...@@ -20,17 +20,6 @@ ...@@ -20,17 +20,6 @@
#define NVKM_RAM_TYPE_VM 0x7f #define NVKM_RAM_TYPE_VM 0x7f
#define NV_MEM_COMP_VM 0x03 #define NV_MEM_COMP_VM 0x03
struct nvkm_mem {
struct nvkm_mm_node *mem;
dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
struct sg_table *sg;
struct nvkm_memory *memory;
};
struct nvkm_fb_tile { struct nvkm_fb_tile {
struct nvkm_mm_node *tag; struct nvkm_mm_node *tag;
u32 addr; u32 addr;
...@@ -57,7 +46,6 @@ struct nvkm_fb { ...@@ -57,7 +46,6 @@ struct nvkm_fb {
struct nvkm_memory *mmu_wr; struct nvkm_memory *mmu_wr;
}; };
bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size, void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *); u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *); void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
...@@ -163,6 +151,4 @@ struct nvkm_ram_func { ...@@ -163,6 +151,4 @@ struct nvkm_ram_func {
int (*prog)(struct nvkm_ram *); int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *); void (*tidy)(struct nvkm_ram *);
}; };
extern const u8 gf100_pte_storage_type_map[256];
#endif #endif
#ifndef __NVKM_MMU_H__ #ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__ #define __NVKM_MMU_H__
#include <core/subdev.h> #include <core/subdev.h>
struct nvkm_mem;
struct nvkm_vm_pgt {
struct nvkm_memory *mem[2];
u32 refcount[2];
};
struct nvkm_vma { struct nvkm_vma {
struct list_head head; struct list_head head;
...@@ -24,14 +18,9 @@ struct nvkm_vma { ...@@ -24,14 +18,9 @@ struct nvkm_vma {
bool busy:1; /* Region busy (for temporarily preventing user access). */ bool busy:1; /* Region busy (for temporarily preventing user access). */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */ struct nvkm_tags *tags; /* Compression tag reference. */
struct nvkm_vma *node;
struct nvkm_vm *vm;
u64 offset;
u32 access;
}; };
struct nvkm_vm { struct nvkm_vmm {
const struct nvkm_vmm_func *func; const struct nvkm_vmm_func *func;
struct nvkm_mmu *mmu; struct nvkm_mmu *mmu;
const char *name; const char *name;
...@@ -56,17 +45,6 @@ struct nvkm_vm { ...@@ -56,17 +45,6 @@ struct nvkm_vm {
void *nullp; void *nullp;
}; };
int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
struct lock_class_key *, struct nvkm_vm **);
int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_memory *inst);
int nvkm_vm_boot(struct nvkm_vm *, u64 size);
int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *);
void nvkm_vm_put(struct nvkm_vma *);
void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
void nvkm_vm_unmap(struct nvkm_vma *);
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc, int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
struct lock_class_key *, const char *name, struct nvkm_vmm **); struct lock_class_key *, const char *name, struct nvkm_vmm **);
struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *); struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
...@@ -105,9 +83,7 @@ struct nvkm_mmu { ...@@ -105,9 +83,7 @@ struct nvkm_mmu {
const struct nvkm_mmu_func *func; const struct nvkm_mmu_func *func;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
u64 limit;
u8 dma_bits; u8 dma_bits;
u8 lpg_shift;
int heap_nr; int heap_nr;
struct { struct {
......
...@@ -271,12 +271,6 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, ...@@ -271,12 +271,6 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
} }
cli->mem = &mems[ret]; cli->mem = &mems[ret];
if (1) {
cli->vm = cli->vmm.vm;
nvxx_client(&cli->base)->vm = cli->vm;
}
return 0; return 0;
done: done:
if (ret) if (ret)
......
...@@ -97,7 +97,6 @@ struct nouveau_cli { ...@@ -97,7 +97,6 @@ struct nouveau_cli {
struct nouveau_vmm vmm; struct nouveau_vmm vmm;
const struct nvif_mclass *mem; const struct nvif_mclass *mem;
struct nvkm_vm *vm;
struct list_head head; struct list_head head;
void *abi16; void *abi16;
struct list_head objects; struct list_head objects;
......
...@@ -267,18 +267,11 @@ int ...@@ -267,18 +267,11 @@ int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data, nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct drm_nouveau_gem_new *req = data; struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL; struct nouveau_bo *nvbo = NULL;
int ret = 0; int ret = 0;
if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
ret = nouveau_gem_new(cli, req->info.size, req->align, ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode, req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo); req->info.tile_flags, &nvbo);
......
...@@ -131,6 +131,5 @@ nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm) ...@@ -131,6 +131,5 @@ nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
return ret; return ret;
vmm->cli = cli; vmm->cli = cli;
vmm->vm = nvkm_uvmm(vmm->vmm.object.priv)->vmm;
return 0; return 0;
} }
#ifndef __NOUVEAU_VMA_H__ #ifndef __NOUVEAU_VMA_H__
#define __NOUVEAU_VMA_H__ #define __NOUVEAU_VMA_H__
#include <subdev/mmu/uvmm.h>
#include <nvif/vmm.h> #include <nvif/vmm.h>
struct nouveau_bo; struct nouveau_bo;
struct nouveau_mem; struct nouveau_mem;
......
...@@ -31,12 +31,6 @@ ...@@ -31,12 +31,6 @@
#include <engine/gr.h> #include <engine/gr.h>
#include <engine/mpeg.h> #include <engine/mpeg.h>
bool
nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
{
return fb->func->memtype_valid(fb, memtype);
}
void void
nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
{ {
......
...@@ -27,15 +27,6 @@ ...@@ -27,15 +27,6 @@
#include <core/memory.h> #include <core/memory.h>
#include <core/option.h> #include <core/option.h>
extern const u8 gf100_pte_storage_type_map[256];
bool
gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
{
u8 memtype = (tile_flags & 0x0000ff00) >> 8;
return likely((gf100_pte_storage_type_map[memtype] != 0xff));
}
void void
gf100_fb_intr(struct nvkm_fb *base) gf100_fb_intr(struct nvkm_fb *base)
{ {
...@@ -140,7 +131,6 @@ gf100_fb = { ...@@ -140,7 +131,6 @@ gf100_fb = {
.init_page = gf100_fb_init_page, .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.ram_new = gf100_ram_new, .ram_new = gf100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17, .default_bigpage = 17,
}; };
......
...@@ -32,7 +32,6 @@ gf108_fb = { ...@@ -32,7 +32,6 @@ gf108_fb = {
.init_page = gf100_fb_init_page, .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.ram_new = gf108_ram_new, .ram_new = gf108_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17, .default_bigpage = 17,
}; };
......
...@@ -32,7 +32,6 @@ gk104_fb = { ...@@ -32,7 +32,6 @@ gk104_fb = {
.init_page = gf100_fb_init_page, .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.ram_new = gk104_ram_new, .ram_new = gk104_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17, .default_bigpage = 17,
}; };
......
...@@ -30,7 +30,6 @@ gk20a_fb = { ...@@ -30,7 +30,6 @@ gk20a_fb = {
.init = gf100_fb_init, .init = gf100_fb_init,
.init_page = gf100_fb_init_page, .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17, .default_bigpage = 17,
}; };
......
...@@ -32,7 +32,6 @@ gm107_fb = { ...@@ -32,7 +32,6 @@ gm107_fb = {
.init_page = gf100_fb_init_page, .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.ram_new = gm107_ram_new, .ram_new = gm107_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 17, .default_bigpage = 17,
}; };
......
...@@ -65,7 +65,6 @@ gm200_fb = { ...@@ -65,7 +65,6 @@ gm200_fb = {
.init_page = gm200_fb_init_page, .init_page = gm200_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.ram_new = gm200_ram_new, .ram_new = gm200_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 0 /* per-instance. */, .default_bigpage = 0 /* per-instance. */,
}; };
......
...@@ -30,7 +30,6 @@ gm20b_fb = { ...@@ -30,7 +30,6 @@ gm20b_fb = {
.init = gm200_fb_init, .init = gm200_fb_init,
.init_page = gm200_fb_init_page, .init_page = gm200_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
.default_bigpage = 0 /* per-instance. */, .default_bigpage = 0 /* per-instance. */,
}; };
......
...@@ -59,7 +59,6 @@ gp100_fb = { ...@@ -59,7 +59,6 @@ gp100_fb = {
.init_page = gm200_fb_init_page, .init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn, .init_unkn = gp100_fb_init_unkn,
.ram_new = gp100_ram_new, .ram_new = gp100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
}; };
int int
......
...@@ -33,7 +33,6 @@ gp102_fb = { ...@@ -33,7 +33,6 @@ gp102_fb = {
.init = gp100_fb_init, .init = gp100_fb_init,
.init_page = gm200_fb_init_page, .init_page = gm200_fb_init_page,
.ram_new = gp100_ram_new, .ram_new = gp100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
}; };
int int
......
...@@ -28,7 +28,6 @@ gp10b_fb = { ...@@ -28,7 +28,6 @@ gp10b_fb = {
.init = gm200_fb_init, .init = gm200_fb_init,
.init_page = gm200_fb_init_page, .init_page = gm200_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
}; };
int int
......
...@@ -25,14 +25,6 @@ ...@@ -25,14 +25,6 @@
#include "ram.h" #include "ram.h"
#include "regsnv04.h" #include "regsnv04.h"
bool
nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
{
if (!(tile_flags & 0xff00))
return true;
return false;
}
static void static void
nv04_fb_init(struct nvkm_fb *fb) nv04_fb_init(struct nvkm_fb *fb)
{ {
...@@ -49,7 +41,6 @@ static const struct nvkm_fb_func ...@@ -49,7 +41,6 @@ static const struct nvkm_fb_func
nv04_fb = { nv04_fb = {
.init = nv04_fb_init, .init = nv04_fb_init,
.ram_new = nv04_ram_new, .ram_new = nv04_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -61,7 +61,6 @@ nv10_fb = { ...@@ -61,7 +61,6 @@ nv10_fb = {
.tile.fini = nv10_fb_tile_fini, .tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog, .tile.prog = nv10_fb_tile_prog,
.ram_new = nv10_ram_new, .ram_new = nv10_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -33,7 +33,6 @@ nv1a_fb = { ...@@ -33,7 +33,6 @@ nv1a_fb = {
.tile.fini = nv10_fb_tile_fini, .tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog, .tile.prog = nv10_fb_tile_prog,
.ram_new = nv1a_ram_new, .ram_new = nv1a_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -93,7 +93,6 @@ nv20_fb = { ...@@ -93,7 +93,6 @@ nv20_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog, .tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new, .ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -51,7 +51,6 @@ nv25_fb = { ...@@ -51,7 +51,6 @@ nv25_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog, .tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new, .ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -124,7 +124,6 @@ nv30_fb = { ...@@ -124,7 +124,6 @@ nv30_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog, .tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new, .ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -53,7 +53,6 @@ nv35_fb = { ...@@ -53,7 +53,6 @@ nv35_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog, .tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new, .ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -53,7 +53,6 @@ nv36_fb = { ...@@ -53,7 +53,6 @@ nv36_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog, .tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new, .ram_new = nv20_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -59,7 +59,6 @@ nv40_fb = { ...@@ -59,7 +59,6 @@ nv40_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog, .tile.prog = nv20_fb_tile_prog,
.ram_new = nv40_ram_new, .ram_new = nv40_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -53,7 +53,6 @@ nv41_fb = { ...@@ -53,7 +53,6 @@ nv41_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog, .tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new, .ram_new = nv41_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -62,7 +62,6 @@ nv44_fb = { ...@@ -62,7 +62,6 @@ nv44_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog, .tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new, .ram_new = nv44_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -48,7 +48,6 @@ nv46_fb = { ...@@ -48,7 +48,6 @@ nv46_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog, .tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new, .ram_new = nv44_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -36,7 +36,6 @@ nv47_fb = { ...@@ -36,7 +36,6 @@ nv47_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog, .tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new, .ram_new = nv41_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -36,7 +36,6 @@ nv49_fb = { ...@@ -36,7 +36,6 @@ nv49_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog, .tile.prog = nv41_fb_tile_prog,
.ram_new = nv49_ram_new, .ram_new = nv49_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -34,7 +34,6 @@ nv4e_fb = { ...@@ -34,7 +34,6 @@ nv4e_fb = {
.tile.fini = nv20_fb_tile_fini, .tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog, .tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new, .ram_new = nv44_ram_new,
.memtype_valid = nv04_fb_memtype_valid,
}; };
int int
......
...@@ -28,18 +28,6 @@ ...@@ -28,18 +28,6 @@
#include <core/enum.h> #include <core/enum.h>
#include <engine/fifo.h> #include <engine/fifo.h>
int
nv50_fb_memtype[0x80] = {
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
};
static int static int
nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram) nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
{ {
...@@ -47,12 +35,6 @@ nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram) ...@@ -47,12 +35,6 @@ nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
return fb->func->ram_new(&fb->base, pram); return fb->func->ram_new(&fb->base, pram);
} }
static bool
nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
{
return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
}
static const struct nvkm_enum vm_dispatch_subclients[] = { static const struct nvkm_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX" }, { 0x00000000, "GRCTX" },
{ 0x00000001, "NOTIFY" }, { 0x00000001, "NOTIFY" },
...@@ -276,7 +258,6 @@ nv50_fb_ = { ...@@ -276,7 +258,6 @@ nv50_fb_ = {
.init = nv50_fb_init, .init = nv50_fb_init,
.intr = nv50_fb_intr, .intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new, .ram_new = nv50_fb_ram_new,
.memtype_valid = nv50_fb_memtype_valid,
}; };
int int
......
...@@ -18,5 +18,4 @@ struct nv50_fb_func { ...@@ -18,5 +18,4 @@ struct nv50_fb_func {
int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index, int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
struct nvkm_fb **pfb); struct nvkm_fb **pfb);
extern int nv50_fb_memtype[0x80];
#endif #endif
...@@ -25,8 +25,6 @@ struct nvkm_fb_func { ...@@ -25,8 +25,6 @@ struct nvkm_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **); int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
u8 default_bigpage; u8 default_bigpage;
}; };
...@@ -36,8 +34,6 @@ int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device, ...@@ -36,8 +34,6 @@ int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
int index, struct nvkm_fb **); int index, struct nvkm_fb **);
int nvkm_fb_bios_memtype(struct nvkm_bios *); int nvkm_fb_bios_memtype(struct nvkm_bios *);
bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size, void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *); u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *); void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
...@@ -67,7 +63,6 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size, ...@@ -67,7 +63,6 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
int gf100_fb_oneinit(struct nvkm_fb *); int gf100_fb_oneinit(struct nvkm_fb *);
int gf100_fb_init_page(struct nvkm_fb *); int gf100_fb_init_page(struct nvkm_fb *);
bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
int gm200_fb_init_page(struct nvkm_fb *); int gm200_fb_init_page(struct nvkm_fb *);
#endif #endif
...@@ -45,15 +45,6 @@ nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, ...@@ -45,15 +45,6 @@ nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
.mem = vram->mn, .mem = vram->mn,
}; };
if (vma->vm) {
struct nvkm_mem mem = {
.mem = vram->mn,
.memory = &vram->memory,
};
nvkm_vm_map_at(vma, offset, &mem);
return 0;
}
return nvkm_vmm_map(vmm, vma, argv, argc, &map); return nvkm_vmm_map(vmm, vma, argv, argc, &map);
} }
......
...@@ -44,9 +44,7 @@ ...@@ -44,9 +44,7 @@
#include "priv.h" #include "priv.h"
#include <core/memory.h> #include <core/memory.h>
#include <core/mm.h>
#include <core/tegra.h> #include <core/tegra.h>
#include <subdev/fb.h>
#include <subdev/ltc.h> #include <subdev/ltc.h>
#include <subdev/mmu.h> #include <subdev/mmu.h>
...@@ -290,15 +288,6 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, ...@@ -290,15 +288,6 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
.mem = node->mn, .mem = node->mn,
}; };
if (vma->vm) {
struct nvkm_mem mem = {
.mem = node->mn,
.memory = &node->memory,
};
nvkm_vm_map_at(vma, 0, &mem);
return 0;
}
return nvkm_vmm_map(vmm, vma, argv, argc, &map); return nvkm_vmm_map(vmm, vma, argv, argc, &map);
} }
......
...@@ -216,67 +216,6 @@ nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero) ...@@ -216,67 +216,6 @@ nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
return pt; return pt;
} }
static void
nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta,
struct nvkm_mem *mem, nvkm_vmm_pte_func fn,
struct nvkm_vmm_map *map)
{
union {
struct nv50_vmm_map_v0 nv50;
struct gf100_vmm_map_v0 gf100;
} args;
struct nvkm_vmm *vmm = vma->vm;
void *argv = NULL;
u32 argc = 0;
int ret;
map->memory = mem->memory;
map->page = page;
if (vmm->func->valid) {
switch (vmm->mmu->subdev.device->card_type) {
case NV_50:
args.nv50.version = 0;
args.nv50.ro = !(vma->access & NV_MEM_ACCESS_WO);
args.nv50.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
args.nv50.kind = (mem->memtype & 0x07f);
args.nv50.comp = (mem->memtype & 0x180) >> 7;
argv = &args.nv50;
argc = sizeof(args.nv50);
break;
case NV_C0:
case NV_E0:
case GM100:
case GP100: {
args.gf100.version = 0;
args.gf100.vol = (nvkm_memory_target(map->memory) != NVKM_MEM_TARGET_VRAM);
args.gf100.ro = !(vma->access & NV_MEM_ACCESS_WO);
args.gf100.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
args.gf100.kind = (mem->memtype & 0x0ff);
argv = &args.gf100;
argc = sizeof(args.gf100);
}
break;
default:
break;
}
ret = vmm->func->valid(vmm, argv, argc, map);
if (WARN_ON(ret))
return;
}
mutex_lock(&vmm->mutex);
nvkm_vmm_ptes_map(vmm, page, vma->node->addr + delta,
vma->node->size, map, fn);
mutex_unlock(&vmm->mutex);
nvkm_memory_tags_put(vma->node->memory, vmm->mmu->subdev.device, &vma->node->tags);
nvkm_memory_unref(&vma->node->memory);
vma->node->memory = nvkm_memory_ref(map->memory);
vma->node->tags = map->tags;
}
void void
nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu) nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
{ {
...@@ -312,138 +251,6 @@ nvkm_mmu_ptc_init(struct nvkm_mmu *mmu) ...@@ -312,138 +251,6 @@ nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
INIT_LIST_HEAD(&mmu->ptp.list); INIT_LIST_HEAD(&mmu->ptp.list);
} }
void
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
{
const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .mem = node->mem };
nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
return;
}
}
static void
nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem)
{
const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
return;
}
}
static void
nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem)
{
const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .dma = mem->pages };
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
return;
}
}
void
nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
{
if (node->sg)
nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
else
if (node->pages)
nvkm_vm_map_sg(vma, 0, node->size << 12, node);
else
nvkm_vm_map_at(vma, 0, node);
}
void
nvkm_vm_unmap(struct nvkm_vma *vma)
{
nvkm_vmm_unmap(vma->vm, vma->node);
}
int
nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *vma)
{
int ret;
mutex_lock(&vm->mutex);
ret = nvkm_vmm_get_locked(vm, true, false, false, page_shift, 0,
size, &vma->node);
mutex_unlock(&vm->mutex);
if (ret)
return ret;
vma->memory = NULL;
vma->tags = NULL;
vma->vm = NULL;
nvkm_vm_ref(vm, &vma->vm, NULL);
vma->offset = vma->addr = vma->node->addr;
vma->access = access;
return 0;
}
void
nvkm_vm_put(struct nvkm_vma *vma)
{
nvkm_vmm_put(vma->vm, &vma->node);
nvkm_vm_ref(NULL, &vma->vm, NULL);
}
int
nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
{
return nvkm_vmm_boot(vm);
}
int
nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
struct lock_class_key *key, struct nvkm_vm **pvm)
{
struct nvkm_mmu *mmu = device->mmu;
*pvm = NULL;
if (mmu->func->vmm.ctor) {
int ret = mmu->func->vmm.ctor(mmu, mm_offset,
offset + length - mm_offset,
NULL, 0, key, "legacy", pvm);
if (ret) {
nvkm_vm_ref(NULL, pvm, NULL);
return ret;
}
return ret;
}
return -EINVAL;
}
int
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
{
if (ref) {
if (inst) {
int ret = nvkm_vmm_join(ref, inst);
if (ret)
return ret;
}
nvkm_vmm_ref(ref);
}
if (*ptr) {
nvkm_vmm_part(*ptr, inst);
nvkm_vmm_unref(ptr);
}
*ptr = ref;
return 0;
}
static void static void
nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type) nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
{ {
...@@ -611,9 +418,7 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device, ...@@ -611,9 +418,7 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
{ {
nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev); nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
mmu->func = func; mmu->func = func;
mmu->limit = func->limit;
mmu->dma_bits = func->dma_bits; mmu->dma_bits = func->dma_bits;
mmu->lpg_shift = func->lpg_shift;
nvkm_mmu_ptc_init(mmu); nvkm_mmu_ptc_init(mmu);
mmu->user.ctor = nvkm_ummu_new; mmu->user.ctor = nvkm_ummu_new;
mmu->user.base = func->mmu.user; mmu->user.base = func->mmu.user;
......
...@@ -26,9 +26,7 @@ ...@@ -26,9 +26,7 @@
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
g84_mmu = { g84_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
......
...@@ -29,8 +29,11 @@ ...@@ -29,8 +29,11 @@
/* Map from compressed to corresponding uncompressed storage type. /* Map from compressed to corresponding uncompressed storage type.
* The value 0xff represents an invalid storage type. * The value 0xff represents an invalid storage type.
*/ */
const u8 gf100_pte_storage_type_map[256] = const u8 *
gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
{ {
static const u8
kind[256] = {
0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */ 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
...@@ -63,20 +66,15 @@ const u8 gf100_pte_storage_type_map[256] = ...@@ -63,20 +66,15 @@ const u8 gf100_pte_storage_type_map[256] =
0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
}; };
const u8 * *count = ARRAY_SIZE(kind);
gf100_mmu_kind(struct nvkm_mmu *mmu, int *count) return kind;
{
*count = ARRAY_SIZE(gf100_pte_storage_type_map);
return gf100_pte_storage_type_map;
} }
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gf100_mmu = { gf100_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
......
...@@ -26,9 +26,7 @@ ...@@ -26,9 +26,7 @@
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gk104_mmu = { gk104_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
......
...@@ -26,9 +26,7 @@ ...@@ -26,9 +26,7 @@
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gk20a_mmu = { gk20a_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
......
...@@ -70,9 +70,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count) ...@@ -70,9 +70,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gm200_mmu = { gm200_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
...@@ -82,9 +80,7 @@ gm200_mmu = { ...@@ -82,9 +80,7 @@ gm200_mmu = {
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gm200_mmu_fixed = { gm200_mmu_fixed = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
......
...@@ -28,9 +28,7 @@ ...@@ -28,9 +28,7 @@
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gm20b_mmu = { gm20b_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
...@@ -40,9 +38,7 @@ gm20b_mmu = { ...@@ -40,9 +38,7 @@ gm20b_mmu = {
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gm20b_mmu_fixed = { gm20b_mmu_fixed = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
......
...@@ -28,9 +28,7 @@ ...@@ -28,9 +28,7 @@
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gp100_mmu = { gp100_mmu = {
.limit = (1ULL << 49),
.dma_bits = 47, .dma_bits = 47,
.lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
......
...@@ -28,9 +28,7 @@ ...@@ -28,9 +28,7 @@
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
gp10b_mmu = { gp10b_mmu = {
.limit = (1ULL << 49),
.dma_bits = 47, .dma_bits = 47,
.lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
......
...@@ -26,13 +26,9 @@ ...@@ -26,13 +26,9 @@
#include <nvif/class.h> #include <nvif/class.h>
#define NV04_PDMA_SIZE (128 * 1024 * 1024)
const struct nvkm_mmu_func const struct nvkm_mmu_func
nv04_mmu = { nv04_mmu = {
.limit = NV04_PDMA_SIZE,
.dma_bits = 32, .dma_bits = 32,
.lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
......
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
#include <nvif/class.h> #include <nvif/class.h>
#define NV41_GART_SIZE (512 * 1024 * 1024)
static void static void
nv41_mmu_init(struct nvkm_mmu *mmu) nv41_mmu_init(struct nvkm_mmu *mmu)
{ {
...@@ -42,9 +40,7 @@ nv41_mmu_init(struct nvkm_mmu *mmu) ...@@ -42,9 +40,7 @@ nv41_mmu_init(struct nvkm_mmu *mmu)
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
nv41_mmu = { nv41_mmu = {
.init = nv41_mmu_init, .init = nv41_mmu_init,
.limit = NV41_GART_SIZE,
.dma_bits = 39, .dma_bits = 39,
.lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
......
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
#include <nvif/class.h> #include <nvif/class.h>
#define NV44_GART_SIZE (512 * 1024 * 1024)
static void static void
nv44_mmu_init(struct nvkm_mmu *mmu) nv44_mmu_init(struct nvkm_mmu *mmu)
{ {
...@@ -57,9 +55,7 @@ nv44_mmu_init(struct nvkm_mmu *mmu) ...@@ -57,9 +55,7 @@ nv44_mmu_init(struct nvkm_mmu *mmu)
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
nv44_mmu = { nv44_mmu = {
.init = nv44_mmu_init, .init = nv44_mmu_init,
.limit = NV44_GART_SIZE,
.dma_bits = 39, .dma_bits = 39,
.lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
......
...@@ -62,9 +62,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count) ...@@ -62,9 +62,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count)
static const struct nvkm_mmu_func static const struct nvkm_mmu_func
nv50_mmu = { nv50_mmu = {
.limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}}, .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
......
...@@ -11,9 +11,7 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, ...@@ -11,9 +11,7 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
struct nvkm_mmu_func { struct nvkm_mmu_func {
void (*init)(struct nvkm_mmu *); void (*init)(struct nvkm_mmu *);
u64 limit;
u8 dma_bits; u8 dma_bits;
u8 lpg_shift;
struct { struct {
struct nvkm_sclass user; struct nvkm_sclass user;
......
...@@ -700,7 +700,7 @@ nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, ...@@ -700,7 +700,7 @@ nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
func->unmap); func->unmap);
} }
void static void
nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map, u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func) nvkm_vmm_pte_func func)
......
...@@ -160,9 +160,6 @@ int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref, ...@@ -160,9 +160,6 @@ int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *); void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *); void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma); void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
void nvkm_vmm_ptes_map(struct nvkm_vmm *, const struct nvkm_vmm_page *,
u64 addr, u64 size, struct nvkm_vmm_map *,
nvkm_vmm_pte_func);
struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail); struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *); void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment