Commit 26880e76 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: remove support for old backends

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent f9400afb
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#define NV_MEM_COMP_VM 0x03 #define NV_MEM_COMP_VM 0x03
struct nvkm_mem { struct nvkm_mem {
struct nvkm_mm_node *tag;
struct nvkm_mm_node *mem; struct nvkm_mm_node *mem;
dma_addr_t *pages; dma_addr_t *pages;
u32 memtype; u32 memtype;
......
...@@ -39,9 +39,6 @@ struct nvkm_vm { ...@@ -39,9 +39,6 @@ struct nvkm_vm {
struct nvkm_mm mm; struct nvkm_mm mm;
struct kref refcount; struct kref refcount;
struct nvkm_vm_pgt *pgt;
u32 fpde;
u32 lpde;
bool bootstrapped; bool bootstrapped;
atomic_t engref[NVKM_SUBDEV_NR]; atomic_t engref[NVKM_SUBDEV_NR];
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_bo.h" #include "nouveau_bo.h"
#include <subdev/ltc.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
int int
...@@ -46,8 +44,6 @@ nouveau_mem_fini(struct nouveau_mem *mem) ...@@ -46,8 +44,6 @@ nouveau_mem_fini(struct nouveau_mem *mem)
nvkm_vm_unmap(&mem->vma[0]); nvkm_vm_unmap(&mem->vma[0]);
nvkm_vm_put(&mem->vma[0]); nvkm_vm_put(&mem->vma[0]);
} }
nvkm_memory_tags_put(&mem->memory, nvxx_device(&mem->cli->device),
&mem->tags);
} }
int int
...@@ -112,32 +108,6 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page) ...@@ -112,32 +108,6 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
mem->_mem->size = size >> NVKM_RAM_MM_SHIFT; mem->_mem->size = size >> NVKM_RAM_MM_SHIFT;
mem->_mem->offset = nvkm_memory_addr(mem->_mem->memory); mem->_mem->offset = nvkm_memory_addr(mem->_mem->memory);
if (cli->device.info.chipset < 0xc0 && mem->comp) {
if (page == 16) {
ret = nvkm_memory_tags_get(mem->_mem->memory, device,
size >> page, NULL,
&mem->tags);
WARN_ON(ret);
}
if (!mem->tags || !mem->tags->mn)
mem->comp = 0;
} else
if (cli->device.info.chipset >= 0xc0 &&
gf100_pte_storage_type_map[mem->kind] != mem->kind) {
if (page == 17) {
ret = nvkm_memory_tags_get(mem->_mem->memory, device,
size >> page,
nvkm_ltc_tags_clear,
&mem->tags);
WARN_ON(ret);
}
if (!mem->tags || !mem->tags->mn)
mem->kind = gf100_pte_storage_type_map[mem->kind];
}
if (mem->tags && mem->tags->mn)
mem->_mem->tag = mem->tags->mn;
mem->_mem->mem = ((struct nvkm_vram *)mem->_mem->memory)->mn; mem->_mem->mem = ((struct nvkm_vram *)mem->_mem->memory)->mn;
mem->_mem->memtype = (mem->comp << 7) | mem->kind; mem->_mem->memtype = (mem->comp << 7) | mem->kind;
......
...@@ -25,7 +25,6 @@ struct nouveau_mem { ...@@ -25,7 +25,6 @@ struct nouveau_mem {
struct nvkm_vma bar_vma; struct nvkm_vma bar_vma;
struct nvkm_memory memory; struct nvkm_memory memory;
struct nvkm_tags *tags;
}; };
enum nvif_vmm_get { enum nvif_vmm_get {
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include "priv.h" #include "priv.h"
#include "vmm.h" #include "vmm.h"
#include <core/gpuobj.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <nvif/if500d.h> #include <nvif/if500d.h>
...@@ -316,17 +315,6 @@ void ...@@ -316,17 +315,6 @@ void
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page; const struct nvkm_vmm_page *page = vma->vm->func->page;
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_mm_node *r = node->mem;
int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len;
if (page->desc->func->unmap) { if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .mem = node->mem }; struct nvkm_vmm_map map = { .mem = node->mem };
while (page->shift != vma->node->type) while (page->shift != vma->node->type)
...@@ -334,36 +322,6 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) ...@@ -334,36 +322,6 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map); nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
return; return;
} }
delta = 0;
while (r) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
while (num) {
struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
mmu->func->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
if (unlikely(end >= max)) {
phys += len << (bits + 12);
pde++;
pte = 0;
}
delta += (u64)len << vma->node->type;
}
r = r->next;
}
mmu->func->flush(vm);
} }
static void static void
...@@ -371,20 +329,6 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, ...@@ -371,20 +329,6 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem) struct nvkm_mem *mem)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page; const struct nvkm_vmm_page *page = vma->vm->func->page;
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
u32 max = 1 << (mmu->func->pgt_bits - bits);
unsigned m, sglen;
u32 end, len;
int i;
struct scatterlist *sg;
if (page->desc->func->unmap) { if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .sgl = mem->sg->sgl }; struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
while (page->shift != vma->node->type) while (page->shift != vma->node->type)
...@@ -392,45 +336,6 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, ...@@ -392,45 +336,6 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map); nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
return; return;
} }
for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
sglen = sg_dma_len(sg) >> PAGE_SHIFT;
end = pte + sglen;
if (unlikely(end >= max))
end = max;
len = end - pte;
for (m = 0; m < len; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
num--;
pte++;
if (num == 0)
goto finish;
}
if (unlikely(end >= max)) {
pde++;
pte = 0;
}
if (m < sglen) {
for (; m < sglen; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
num--;
pte++;
if (num == 0)
goto finish;
}
}
}
finish:
mmu->func->flush(vm);
} }
static void static void
...@@ -438,18 +343,6 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, ...@@ -438,18 +343,6 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem) struct nvkm_mem *mem)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page; const struct nvkm_vmm_page *page = vma->vm->func->page;
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
dma_addr_t *list = mem->pages;
int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len;
if (page->desc->func->unmap) { if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .dma = mem->pages }; struct nvkm_vmm_map map = { .dma = mem->pages };
while (page->shift != vma->node->type) while (page->shift != vma->node->type)
...@@ -457,27 +350,6 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, ...@@ -457,27 +350,6 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map); nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
return; return;
} }
while (num) {
struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
mmu->func->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
list += len;
if (unlikely(end >= max)) {
pde++;
pte = 0;
}
}
mmu->func->flush(vm);
} }
void void
...@@ -496,16 +368,6 @@ void ...@@ -496,16 +368,6 @@ void
nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length) nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
{ {
struct nvkm_vm *vm = vma->vm; struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len;
if (vm->func->page->desc->func->unmap) { if (vm->func->page->desc->func->unmap) {
const struct nvkm_vmm_page *page = vm->func->page; const struct nvkm_vmm_page *page = vm->func->page;
while (page->shift != vma->node->type) while (page->shift != vma->node->type)
...@@ -516,26 +378,6 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length) ...@@ -516,26 +378,6 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
return; return;
} }
while (num) {
struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
mmu->func->unmap(vma, pgt, pte, len);
num -= len;
pte += len;
if (unlikely(end >= max)) {
pde++;
pte = 0;
}
}
mmu->func->flush(vm);
} }
void void
...@@ -547,63 +389,12 @@ nvkm_vm_unmap(struct nvkm_vma *vma) ...@@ -547,63 +389,12 @@ nvkm_vm_unmap(struct nvkm_vma *vma)
nvkm_memory_unref(&vma->memory); nvkm_memory_unref(&vma->memory);
} }
static void
nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
{
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgt *vpgt;
struct nvkm_memory *pgt;
u32 pde;
for (pde = fpde; pde <= lpde; pde++) {
vpgt = &vm->pgt[pde - vm->fpde];
if (--vpgt->refcount[big])
continue;
pgt = vpgt->mem[big];
vpgt->mem[big] = NULL;
if (mmu->func->map_pgt)
mmu->func->map_pgt(vm, pde, vpgt->mem);
mmu->func->flush(vm);
nvkm_memory_unref(&pgt);
}
}
static int
nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
{
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
int big = (type != mmu->func->spg_shift);
u32 pgt_size;
int ret;
pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
pgt_size *= 8;
ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
pgt_size, 0x1000, true, &vpgt->mem[big]);
if (unlikely(ret))
return ret;
if (mmu->func->map_pgt)
mmu->func->map_pgt(vm, pde, vpgt->mem);
vpgt->refcount[big]++;
return 0;
}
int int
nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *vma) struct nvkm_vma *vma)
{ {
struct nvkm_mmu *mmu = vm->mmu;
u32 align = (1 << page_shift) >> 12; u32 align = (1 << page_shift) >> 12;
u32 msize = size >> 12; u32 msize = size >> 12;
u32 fpde, lpde, pde;
int ret; int ret;
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
...@@ -626,32 +417,7 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, ...@@ -626,32 +417,7 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
return ret; return ret;
} }
goto done;
}
fpde = (vma->node->offset >> mmu->func->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
for (pde = fpde; pde <= lpde; pde++) {
struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
int big = (vma->node->type != mmu->func->spg_shift);
if (likely(vpgt->refcount[big])) {
vpgt->refcount[big]++;
continue;
}
ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
if (ret) {
if (pde != fpde)
nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
nvkm_mm_free(&vm->mm, &vma->node);
mutex_unlock(&vm->mutex);
return ret;
}
} }
done:
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
vma->memory = NULL; vma->memory = NULL;
...@@ -668,7 +434,6 @@ nvkm_vm_put(struct nvkm_vma *vma) ...@@ -668,7 +434,6 @@ nvkm_vm_put(struct nvkm_vma *vma)
{ {
struct nvkm_mmu *mmu; struct nvkm_mmu *mmu;
struct nvkm_vm *vm; struct nvkm_vm *vm;
u32 fpde, lpde;
if (unlikely(vma->node == NULL)) if (unlikely(vma->node == NULL))
return; return;
...@@ -678,9 +443,6 @@ nvkm_vm_put(struct nvkm_vma *vma) ...@@ -678,9 +443,6 @@ nvkm_vm_put(struct nvkm_vma *vma)
nvkm_memory_tags_put(vma->memory, mmu->subdev.device, &vma->tags); nvkm_memory_tags_put(vma->memory, mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory); nvkm_memory_unref(&vma->memory);
fpde = (vma->node->offset >> mmu->func->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
if (vm->func->page->desc->func->unmap) { if (vm->func->page->desc->func->unmap) {
const struct nvkm_vmm_page *page = vm->func->page; const struct nvkm_vmm_page *page = vm->func->page;
...@@ -689,11 +451,8 @@ nvkm_vm_put(struct nvkm_vma *vma) ...@@ -689,11 +451,8 @@ nvkm_vm_put(struct nvkm_vma *vma)
nvkm_vmm_ptes_put(vm, page, vma->node->offset << 12, nvkm_vmm_ptes_put(vm, page, vma->node->offset << 12,
vma->node->length << 12); vma->node->length << 12);
goto done;
} }
nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
done:
nvkm_mm_free(&vm->mm, &vma->node); nvkm_mm_free(&vm->mm, &vma->node);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
...@@ -703,23 +462,7 @@ nvkm_vm_put(struct nvkm_vma *vma) ...@@ -703,23 +462,7 @@ nvkm_vm_put(struct nvkm_vma *vma)
int int
nvkm_vm_boot(struct nvkm_vm *vm, u64 size) nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
{ {
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_memory *pgt;
int ret;
if (vm->func->page->desc->func->unmap)
return nvkm_vmm_boot(vm); return nvkm_vmm_boot(vm);
ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
(size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
if (ret == 0) {
vm->pgt[0].refcount[0] = 1;
vm->pgt[0].mem[0] = pgt;
nvkm_memory_boot(pgt, vm);
vm->bootstrapped = true;
}
return ret;
} }
static int static int
...@@ -730,24 +473,14 @@ nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, ...@@ -730,24 +473,14 @@ nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
int ret; int ret;
kref_init(&vm->refcount); kref_init(&vm->refcount);
vm->fpde = offset >> (mmu->func->pgt_bits + 12);
vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
if (!vm->pgt) {
kfree(vm);
return -ENOMEM;
}
if (block > length) if (block > length)
block = length; block = length;
ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12, ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
block >> 12); block >> 12);
if (ret) { if (ret)
vfree(vm->pgt);
return ret; return ret;
}
return 0; return 0;
} }
...@@ -786,7 +519,6 @@ nvkm_vm_del(struct kref *kref) ...@@ -786,7 +519,6 @@ nvkm_vm_del(struct kref *kref)
struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
nvkm_mm_fini(&vm->mm); nvkm_mm_fini(&vm->mm);
vfree(vm->pgt);
if (vm->func) if (vm->func)
nvkm_vmm_dtor(vm); nvkm_vmm_dtor(vm);
kfree(vm); kfree(vm);
...@@ -797,14 +529,9 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst) ...@@ -797,14 +529,9 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
{ {
if (ref) { if (ref) {
if (ref->func->join && inst) { if (ref->func->join && inst) {
int ret = ref->func->join(ref, inst), i; int ret = ref->func->join(ref, inst);
if (ret) if (ret)
return ret; return ret;
if (!ref->func->page->desc->func->unmap && ref->mmu->func->map_pgt) {
for (i = ref->fpde; i <= ref->lpde; i++)
ref->mmu->func->map_pgt(ref, i, ref->pgt[i - ref->fpde].mem);
}
} }
kref_get(&ref->refcount); kref_get(&ref->refcount);
...@@ -813,12 +540,6 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst) ...@@ -813,12 +540,6 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
if (*ptr) { if (*ptr) {
if ((*ptr)->func->part && inst) if ((*ptr)->func->part && inst)
(*ptr)->func->part(*ptr, inst); (*ptr)->func->part(*ptr, inst);
if ((*ptr)->bootstrapped && inst) {
if (!(*ptr)->func->page->desc->func->unmap) {
nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
(*ptr)->bootstrapped = false;
}
}
kref_put(&(*ptr)->refcount, nvkm_vm_del); kref_put(&(*ptr)->refcount, nvkm_vm_del);
} }
...@@ -838,9 +559,6 @@ nvkm_mmu_oneinit(struct nvkm_subdev *subdev) ...@@ -838,9 +559,6 @@ nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
return ret; return ret;
} }
if (mmu->func->oneinit)
return mmu->func->oneinit(mmu);
return 0; return 0;
} }
......
...@@ -27,8 +27,6 @@ static const struct nvkm_mmu_func ...@@ -27,8 +27,6 @@ static const struct nvkm_mmu_func
g84_mmu = { g84_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 29 - 12,
.spg_shift = 12,
.lpg_shift = 16, .lpg_shift = 16,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
.kind = nv50_mmu_kind, .kind = nv50_mmu_kind,
......
...@@ -75,8 +75,6 @@ static const struct nvkm_mmu_func ...@@ -75,8 +75,6 @@ static const struct nvkm_mmu_func
gf100_mmu = { gf100_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 27 - 12,
.spg_shift = 12,
.lpg_shift = 17, .lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
.kind = gf100_mmu_kind, .kind = gf100_mmu_kind,
......
...@@ -27,8 +27,6 @@ static const struct nvkm_mmu_func ...@@ -27,8 +27,6 @@ static const struct nvkm_mmu_func
gk104_mmu = { gk104_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 27 - 12,
.spg_shift = 12,
.lpg_shift = 17, .lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
.kind = gf100_mmu_kind, .kind = gf100_mmu_kind,
......
...@@ -27,8 +27,6 @@ static const struct nvkm_mmu_func ...@@ -27,8 +27,6 @@ static const struct nvkm_mmu_func
gk20a_mmu = { gk20a_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 27 - 12,
.spg_shift = 12,
.lpg_shift = 17, .lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
.kind = gf100_mmu_kind, .kind = gf100_mmu_kind,
......
...@@ -71,8 +71,6 @@ static const struct nvkm_mmu_func ...@@ -71,8 +71,6 @@ static const struct nvkm_mmu_func
gm200_mmu = { gm200_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 27 - 12,
.spg_shift = 12,
.lpg_shift = 17, .lpg_shift = 17,
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
...@@ -82,8 +80,6 @@ static const struct nvkm_mmu_func ...@@ -82,8 +80,6 @@ static const struct nvkm_mmu_func
gm200_mmu_fixed = { gm200_mmu_fixed = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 27 - 12,
.spg_shift = 12,
.lpg_shift = 17, .lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
......
...@@ -29,8 +29,6 @@ static const struct nvkm_mmu_func ...@@ -29,8 +29,6 @@ static const struct nvkm_mmu_func
gm20b_mmu = { gm20b_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 27 - 12,
.spg_shift = 12,
.lpg_shift = 17, .lpg_shift = 17,
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
...@@ -40,8 +38,6 @@ static const struct nvkm_mmu_func ...@@ -40,8 +38,6 @@ static const struct nvkm_mmu_func
gm20b_mmu_fixed = { gm20b_mmu_fixed = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 27 - 12,
.spg_shift = 12,
.lpg_shift = 17, .lpg_shift = 17,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
......
...@@ -31,8 +31,6 @@ const struct nvkm_mmu_func ...@@ -31,8 +31,6 @@ const struct nvkm_mmu_func
nv04_mmu = { nv04_mmu = {
.limit = NV04_PDMA_SIZE, .limit = NV04_PDMA_SIZE,
.dma_bits = 32, .dma_bits = 32,
.pgt_bits = 32 - 12,
.spg_shift = 12,
.lpg_shift = 12, .lpg_shift = 12,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
}; };
......
...@@ -43,8 +43,6 @@ nv41_mmu = { ...@@ -43,8 +43,6 @@ nv41_mmu = {
.init = nv41_mmu_init, .init = nv41_mmu_init,
.limit = NV41_GART_SIZE, .limit = NV41_GART_SIZE,
.dma_bits = 39, .dma_bits = 39,
.pgt_bits = 32 - 12,
.spg_shift = 12,
.lpg_shift = 12, .lpg_shift = 12,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
}; };
......
...@@ -58,8 +58,6 @@ nv44_mmu = { ...@@ -58,8 +58,6 @@ nv44_mmu = {
.init = nv44_mmu_init, .init = nv44_mmu_init,
.limit = NV44_GART_SIZE, .limit = NV44_GART_SIZE,
.dma_bits = 39, .dma_bits = 39,
.pgt_bits = 32 - 12,
.spg_shift = 12,
.lpg_shift = 12, .lpg_shift = 12,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
}; };
......
...@@ -63,8 +63,6 @@ static const struct nvkm_mmu_func ...@@ -63,8 +63,6 @@ static const struct nvkm_mmu_func
nv50_mmu = { nv50_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.pgt_bits = 29 - 12,
.spg_shift = 12,
.lpg_shift = 16, .lpg_shift = 16,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
.kind = nv50_mmu_kind, .kind = nv50_mmu_kind,
......
...@@ -9,26 +9,12 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, ...@@ -9,26 +9,12 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
int index, struct nvkm_mmu **); int index, struct nvkm_mmu **);
struct nvkm_mmu_func { struct nvkm_mmu_func {
int (*oneinit)(struct nvkm_mmu *);
void (*init)(struct nvkm_mmu *); void (*init)(struct nvkm_mmu *);
u64 limit; u64 limit;
u8 dma_bits; u8 dma_bits;
u32 pgt_bits;
u8 spg_shift;
u8 lpg_shift; u8 lpg_shift;
void (*map_pgt)(struct nvkm_vmm *, u32 pde,
struct nvkm_memory *pgt[2]);
void (*map)(struct nvkm_vma *, struct nvkm_memory *,
struct nvkm_mem *, u32 pte, u32 cnt,
u64 phys, u64 delta);
void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
u32 pte, u32 cnt);
void (*flush)(struct nvkm_vm *);
struct { struct {
struct nvkm_sclass user; struct nvkm_sclass user;
int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size, int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment