Commit 1f5bffca authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: cosmetic changes

This is purely preparation for upcoming commits, there should be no
code changes here.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 2ca0ddbc
...@@ -38,7 +38,7 @@ struct nvkm_vm { ...@@ -38,7 +38,7 @@ struct nvkm_vm {
}; };
struct nvkm_mmu { struct nvkm_mmu {
struct nvkm_subdev base; struct nvkm_subdev subdev;
u64 limit; u64 limit;
u8 dma_bits; u8 dma_bits;
...@@ -69,11 +69,11 @@ nvkm_mmu(void *obj) ...@@ -69,11 +69,11 @@ nvkm_mmu(void *obj)
#define nvkm_mmu_create(p,e,o,i,f,d) \ #define nvkm_mmu_create(p,e,o,i,f,d) \
nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d)) nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d))
#define nvkm_mmu_destroy(p) \ #define nvkm_mmu_destroy(p) \
nvkm_subdev_destroy(&(p)->base) nvkm_subdev_destroy(&(p)->subdev)
#define nvkm_mmu_init(p) \ #define nvkm_mmu_init(p) \
nvkm_subdev_init(&(p)->base) nvkm_subdev_init(&(p)->subdev)
#define nvkm_mmu_fini(p,s) \ #define nvkm_mmu_fini(p,s) \
nvkm_subdev_fini(&(p)->base, (s)) nvkm_subdev_fini(&(p)->subdev, (s))
#define _nvkm_mmu_dtor _nvkm_subdev_dtor #define _nvkm_mmu_dtor _nvkm_subdev_dtor
#define _nvkm_mmu_init _nvkm_subdev_init #define _nvkm_mmu_init _nvkm_subdev_init
......
...@@ -221,7 +221,7 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) ...@@ -221,7 +221,7 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{ {
struct nouveau_drm *drm = nouveau_bdev(man->bdev); struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
struct nv04_mmu_priv *priv = (void *)mmu; struct nv04_mmu *priv = (void *)mmu;
struct nvkm_vm *vm = NULL; struct nvkm_vm *vm = NULL;
nvkm_vm_ref(priv->vm, &vm, NULL); nvkm_vm_ref(priv->vm, &vm, NULL);
man->priv = vm; man->priv = vm;
......
...@@ -60,7 +60,7 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent, ...@@ -60,7 +60,7 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
} }
if (priv->clone) { if (priv->clone) {
struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj); struct nv04_mmu *mmu = nv04_mmu(dmaobj);
struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0]; struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
if (!dmaobj->start) if (!dmaobj->start)
return nvkm_gpuobj_dup(parent, pgt, pgpuobj); return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
...@@ -86,7 +86,7 @@ nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -86,7 +86,7 @@ nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_dmaeng *dmaeng = (void *)engine; struct nvkm_dmaeng *dmaeng = (void *)engine;
struct nv04_mmu_priv *mmu = nv04_mmu(engine); struct nv04_mmu *mmu = nv04_mmu(engine);
struct nv04_dmaobj_priv *priv; struct nv04_dmaobj_priv *priv;
int ret; int ret;
......
...@@ -29,11 +29,6 @@ ...@@ -29,11 +29,6 @@
#include <core/gpuobj.h> #include <core/gpuobj.h>
struct gf100_mmu_priv {
struct nvkm_mmu base;
};
/* Map from compressed to corresponding uncompressed storage type. /* Map from compressed to corresponding uncompressed storage type.
* The value 0xff represents an invalid storage type. * The value 0xff represents an invalid storage type.
*/ */
...@@ -158,8 +153,8 @@ gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) ...@@ -158,8 +153,8 @@ gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
static void static void
gf100_vm_flush(struct nvkm_vm *vm) gf100_vm_flush(struct nvkm_vm *vm)
{ {
struct gf100_mmu_priv *priv = (void *)vm->mmu; struct nvkm_mmu *mmu = (void *)vm->mmu;
struct nvkm_bar *bar = nvkm_bar(priv); struct nvkm_bar *bar = nvkm_bar(mmu);
struct nvkm_vm_pgd *vpgd; struct nvkm_vm_pgd *vpgd;
u32 type; u32 type;
...@@ -169,26 +164,26 @@ gf100_vm_flush(struct nvkm_vm *vm) ...@@ -169,26 +164,26 @@ gf100_vm_flush(struct nvkm_vm *vm)
if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR])) if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */ type |= 0x00000004; /* HUB_ONLY */
mutex_lock(&nv_subdev(priv)->mutex); mutex_lock(&nv_subdev(mmu)->mutex);
list_for_each_entry(vpgd, &vm->pgd_list, head) { list_for_each_entry(vpgd, &vm->pgd_list, head) {
/* looks like maybe a "free flush slots" counter, the /* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases * faster you write to 0x100cbc to more it decreases
*/ */
if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) { if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) {
nv_error(priv, "vm timeout 0: 0x%08x %d\n", nv_error(mmu, "vm timeout 0: 0x%08x %d\n",
nv_rd32(priv, 0x100c80), type); nv_rd32(mmu, 0x100c80), type);
} }
nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8); nv_wr32(mmu, 0x100cb8, vpgd->obj->addr >> 8);
nv_wr32(priv, 0x100cbc, 0x80000000 | type); nv_wr32(mmu, 0x100cbc, 0x80000000 | type);
/* wait for flush to be queued? */ /* wait for flush to be queued? */
if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) { if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) {
nv_error(priv, "vm timeout 1: 0x%08x %d\n", nv_error(mmu, "vm timeout 1: 0x%08x %d\n",
nv_rd32(priv, 0x100c80), type); nv_rd32(mmu, 0x100c80), type);
} }
} }
mutex_unlock(&nv_subdev(priv)->mutex); mutex_unlock(&nv_subdev(mmu)->mutex);
} }
static int static int
...@@ -203,25 +198,25 @@ gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -203,25 +198,25 @@ gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct gf100_mmu_priv *priv; struct nvkm_mmu *mmu;
int ret; int ret;
ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv); ret = nvkm_mmu_create(parent, engine, oclass, "VM", "mmu", &mmu);
*pobject = nv_object(priv); *pobject = nv_object(mmu);
if (ret) if (ret)
return ret; return ret;
priv->base.limit = 1ULL << 40; mmu->limit = 1ULL << 40;
priv->base.dma_bits = 40; mmu->dma_bits = 40;
priv->base.pgt_bits = 27 - 12; mmu->pgt_bits = 27 - 12;
priv->base.spg_shift = 12; mmu->spg_shift = 12;
priv->base.lpg_shift = 17; mmu->lpg_shift = 17;
priv->base.create = gf100_vm_create; mmu->create = gf100_vm_create;
priv->base.map_pgt = gf100_vm_map_pgt; mmu->map_pgt = gf100_vm_map_pgt;
priv->base.map = gf100_vm_map; mmu->map = gf100_vm_map;
priv->base.map_sg = gf100_vm_map_sg; mmu->map_sg = gf100_vm_map_sg;
priv->base.unmap = gf100_vm_unmap; mmu->unmap = gf100_vm_unmap;
priv->base.flush = gf100_vm_flush; mmu->flush = gf100_vm_flush;
return 0; return 0;
} }
......
...@@ -84,37 +84,37 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -84,37 +84,37 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv04_mmu_priv *priv; struct nv04_mmu *mmu;
struct nvkm_gpuobj *dma; struct nvkm_gpuobj *dma;
int ret; int ret;
ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART", ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
"pcigart", &priv); "mmu", &mmu);
*pobject = nv_object(priv); *pobject = nv_object(mmu);
if (ret) if (ret)
return ret; return ret;
priv->base.create = nv04_vm_create; mmu->base.create = nv04_vm_create;
priv->base.limit = NV04_PDMA_SIZE; mmu->base.limit = NV04_PDMA_SIZE;
priv->base.dma_bits = 32; mmu->base.dma_bits = 32;
priv->base.pgt_bits = 32 - 12; mmu->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12; mmu->base.spg_shift = 12;
priv->base.lpg_shift = 12; mmu->base.lpg_shift = 12;
priv->base.map_sg = nv04_vm_map_sg; mmu->base.map_sg = nv04_vm_map_sg;
priv->base.unmap = nv04_vm_unmap; mmu->base.unmap = nv04_vm_unmap;
priv->base.flush = nv04_vm_flush; mmu->base.flush = nv04_vm_flush;
ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096, ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096,
&priv->vm); &mmu->vm);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(priv), NULL, ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8, (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
16, NVOBJ_FLAG_ZERO_ALLOC, 16, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]); &mmu->vm->pgt[0].obj[0]);
dma = priv->vm->pgt[0].obj[0]; dma = mmu->vm->pgt[0].obj[0];
priv->vm->pgt[0].refcount[0] = 1; mmu->vm->pgt[0].refcount[0] = 1;
if (ret) if (ret)
return ret; return ret;
...@@ -126,16 +126,16 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -126,16 +126,16 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
void void
nv04_mmu_dtor(struct nvkm_object *object) nv04_mmu_dtor(struct nvkm_object *object)
{ {
struct nv04_mmu_priv *priv = (void *)object; struct nv04_mmu *mmu = (void *)object;
if (priv->vm) { if (mmu->vm) {
nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]); nvkm_gpuobj_ref(NULL, &mmu->vm->pgt[0].obj[0]);
nvkm_vm_ref(NULL, &priv->vm, NULL); nvkm_vm_ref(NULL, &mmu->vm, NULL);
} }
if (priv->nullp) { if (mmu->nullp) {
pci_free_consistent(nv_device(priv)->pdev, 16 * 1024, pci_free_consistent(nv_device(mmu)->pdev, 16 * 1024,
priv->nullp, priv->null); mmu->nullp, mmu->null);
} }
nvkm_mmu_destroy(&priv->base); nvkm_mmu_destroy(&mmu->base);
} }
struct nvkm_oclass struct nvkm_oclass
......
#ifndef __NV04_MMU_PRIV__ #ifndef __NV04_MMU_PRIV__
#define __NV04_MMU_PRIV__ #define __NV04_MMU_PRIV__
#include <subdev/mmu.h> #include <subdev/mmu.h>
struct nv04_mmu_priv { struct nv04_mmu {
struct nvkm_mmu base; struct nvkm_mmu base;
struct nvkm_vm *vm; struct nvkm_vm *vm;
dma_addr_t null; dma_addr_t null;
void *nullp; void *nullp;
}; };
static inline struct nv04_mmu_priv * static inline struct nv04_mmu *
nv04_mmu(void *obj) nv04_mmu(void *obj)
{ {
return (void *)nvkm_mmu(obj); return (void *)nvkm_mmu(obj);
} }
#endif #endif
...@@ -64,16 +64,16 @@ nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) ...@@ -64,16 +64,16 @@ nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
static void static void
nv41_vm_flush(struct nvkm_vm *vm) nv41_vm_flush(struct nvkm_vm *vm)
{ {
struct nv04_mmu_priv *priv = (void *)vm->mmu; struct nv04_mmu *mmu = (void *)vm->mmu;
mutex_lock(&nv_subdev(priv)->mutex); mutex_lock(&nv_subdev(mmu)->mutex);
nv_wr32(priv, 0x100810, 0x00000022); nv_wr32(mmu, 0x100810, 0x00000022);
if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) { if (!nv_wait(mmu, 0x100810, 0x00000020, 0x00000020)) {
nv_warn(priv, "flush timeout, 0x%08x\n", nv_warn(mmu, "flush timeout, 0x%08x\n",
nv_rd32(priv, 0x100810)); nv_rd32(mmu, 0x100810));
} }
nv_wr32(priv, 0x100810, 0x00000000); nv_wr32(mmu, 0x100810, 0x00000000);
mutex_unlock(&nv_subdev(priv)->mutex); mutex_unlock(&nv_subdev(mmu)->mutex);
} }
/******************************************************************************* /*******************************************************************************
...@@ -86,7 +86,7 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -86,7 +86,7 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = nv_device(parent);
struct nv04_mmu_priv *priv; struct nv04_mmu *mmu;
int ret; int ret;
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
...@@ -96,31 +96,31 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -96,31 +96,31 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
} }
ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART", ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
"pciegart", &priv); "mmu", &mmu);
*pobject = nv_object(priv); *pobject = nv_object(mmu);
if (ret) if (ret)
return ret; return ret;
priv->base.create = nv04_vm_create; mmu->base.create = nv04_vm_create;
priv->base.limit = NV41_GART_SIZE; mmu->base.limit = NV41_GART_SIZE;
priv->base.dma_bits = 39; mmu->base.dma_bits = 39;
priv->base.pgt_bits = 32 - 12; mmu->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12; mmu->base.spg_shift = 12;
priv->base.lpg_shift = 12; mmu->base.lpg_shift = 12;
priv->base.map_sg = nv41_vm_map_sg; mmu->base.map_sg = nv41_vm_map_sg;
priv->base.unmap = nv41_vm_unmap; mmu->base.unmap = nv41_vm_unmap;
priv->base.flush = nv41_vm_flush; mmu->base.flush = nv41_vm_flush;
ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096, ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096,
&priv->vm); &mmu->vm);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(priv), NULL, ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
(NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]); &mmu->vm->pgt[0].obj[0]);
priv->vm->pgt[0].refcount[0] = 1; mmu->vm->pgt[0].refcount[0] = 1;
if (ret) if (ret)
return ret; return ret;
...@@ -130,17 +130,17 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -130,17 +130,17 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
static int static int
nv41_mmu_init(struct nvkm_object *object) nv41_mmu_init(struct nvkm_object *object)
{ {
struct nv04_mmu_priv *priv = (void *)object; struct nv04_mmu *mmu = (void *)object;
struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0]; struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0];
int ret; int ret;
ret = nvkm_mmu_init(&priv->base); ret = nvkm_mmu_init(&mmu->base);
if (ret) if (ret)
return ret; return ret;
nv_wr32(priv, 0x100800, dma->addr | 0x00000002); nv_wr32(mmu, 0x100800, dma->addr | 0x00000002);
nv_mask(priv, 0x10008c, 0x00000100, 0x00000100); nv_mask(mmu, 0x10008c, 0x00000100, 0x00000100);
nv_wr32(priv, 0x100820, 0x00000000); nv_wr32(mmu, 0x100820, 0x00000000);
return 0; return 0;
} }
......
...@@ -84,14 +84,14 @@ static void ...@@ -84,14 +84,14 @@ static void
nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{ {
struct nv04_mmu_priv *priv = (void *)vma->vm->mmu; struct nv04_mmu *mmu = (void *)vma->vm->mmu;
u32 tmp[4]; u32 tmp[4];
int i; int i;
if (pte & 3) { if (pte & 3) {
u32 max = 4 - (pte & 3); u32 max = 4 - (pte & 3);
u32 part = (cnt > max) ? max : cnt; u32 part = (cnt > max) ? max : cnt;
nv44_vm_fill(pgt, priv->null, list, pte, part); nv44_vm_fill(pgt, mmu->null, list, pte, part);
pte += part; pte += part;
list += part; list += part;
cnt -= part; cnt -= part;
...@@ -108,18 +108,18 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -108,18 +108,18 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
} }
if (cnt) if (cnt)
nv44_vm_fill(pgt, priv->null, list, pte, cnt); nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
} }
static void static void
nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{ {
struct nv04_mmu_priv *priv = (void *)nvkm_mmu(pgt); struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt);
if (pte & 3) { if (pte & 3) {
u32 max = 4 - (pte & 3); u32 max = 4 - (pte & 3);
u32 part = (cnt > max) ? max : cnt; u32 part = (cnt > max) ? max : cnt;
nv44_vm_fill(pgt, priv->null, NULL, pte, part); nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
pte += part; pte += part;
cnt -= part; cnt -= part;
} }
...@@ -133,18 +133,18 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) ...@@ -133,18 +133,18 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
} }
if (cnt) if (cnt)
nv44_vm_fill(pgt, priv->null, NULL, pte, cnt); nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
} }
static void static void
nv44_vm_flush(struct nvkm_vm *vm) nv44_vm_flush(struct nvkm_vm *vm)
{ {
struct nv04_mmu_priv *priv = (void *)vm->mmu; struct nv04_mmu *mmu = (void *)vm->mmu;
nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE); nv_wr32(mmu, 0x100814, mmu->base.limit - NV44_GART_PAGE);
nv_wr32(priv, 0x100808, 0x00000020); nv_wr32(mmu, 0x100808, 0x00000020);
if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001)) if (!nv_wait(mmu, 0x100808, 0x00000001, 0x00000001))
nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808)); nv_error(mmu, "timeout: 0x%08x\n", nv_rd32(mmu, 0x100808));
nv_wr32(priv, 0x100808, 0x00000000); nv_wr32(mmu, 0x100808, 0x00000000);
} }
/******************************************************************************* /*******************************************************************************
...@@ -157,7 +157,7 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -157,7 +157,7 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = nv_device(parent);
struct nv04_mmu_priv *priv; struct nv04_mmu *mmu;
int ret; int ret;
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
...@@ -167,37 +167,37 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -167,37 +167,37 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
} }
ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART", ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
"pciegart", &priv); "mmu", &mmu);
*pobject = nv_object(priv); *pobject = nv_object(mmu);
if (ret) if (ret)
return ret; return ret;
priv->base.create = nv04_vm_create; mmu->base.create = nv04_vm_create;
priv->base.limit = NV44_GART_SIZE; mmu->base.limit = NV44_GART_SIZE;
priv->base.dma_bits = 39; mmu->base.dma_bits = 39;
priv->base.pgt_bits = 32 - 12; mmu->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12; mmu->base.spg_shift = 12;
priv->base.lpg_shift = 12; mmu->base.lpg_shift = 12;
priv->base.map_sg = nv44_vm_map_sg; mmu->base.map_sg = nv44_vm_map_sg;
priv->base.unmap = nv44_vm_unmap; mmu->base.unmap = nv44_vm_unmap;
priv->base.flush = nv44_vm_flush; mmu->base.flush = nv44_vm_flush;
priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null); mmu->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &mmu->null);
if (!priv->nullp) { if (!mmu->nullp) {
nv_error(priv, "unable to allocate dummy pages\n"); nv_warn(mmu, "unable to allocate dummy pages\n");
return -ENOMEM; mmu->null = 0;
} }
ret = nvkm_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096, ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096,
&priv->vm); &mmu->vm);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(priv), NULL, ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
(NV44_GART_SIZE / NV44_GART_PAGE) * 4, (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
512 * 1024, NVOBJ_FLAG_ZERO_ALLOC, 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]); &mmu->vm->pgt[0].obj[0]);
priv->vm->pgt[0].refcount[0] = 1; mmu->vm->pgt[0].refcount[0] = 1;
if (ret) if (ret)
return ret; return ret;
...@@ -207,12 +207,12 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -207,12 +207,12 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
static int static int
nv44_mmu_init(struct nvkm_object *object) nv44_mmu_init(struct nvkm_object *object)
{ {
struct nv04_mmu_priv *priv = (void *)object; struct nv04_mmu *mmu = (void *)object;
struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0]; struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
u32 addr; u32 addr;
int ret; int ret;
ret = nvkm_mmu_init(&priv->base); ret = nvkm_mmu_init(&mmu->base);
if (ret) if (ret)
return ret; return ret;
...@@ -220,17 +220,17 @@ nv44_mmu_init(struct nvkm_object *object) ...@@ -220,17 +220,17 @@ nv44_mmu_init(struct nvkm_object *object)
* allocated on 512KiB alignment, and not exceed a total size * allocated on 512KiB alignment, and not exceed a total size
* of 512KiB for this to work correctly * of 512KiB for this to work correctly
*/ */
addr = nv_rd32(priv, 0x10020c); addr = nv_rd32(mmu, 0x10020c);
addr -= ((gart->addr >> 19) + 1) << 19; addr -= ((gart->addr >> 19) + 1) << 19;
nv_wr32(priv, 0x100850, 0x80000000); nv_wr32(mmu, 0x100850, 0x80000000);
nv_wr32(priv, 0x100818, priv->null); nv_wr32(mmu, 0x100818, mmu->null);
nv_wr32(priv, 0x100804, NV44_GART_SIZE); nv_wr32(mmu, 0x100804, NV44_GART_SIZE);
nv_wr32(priv, 0x100850, 0x00008000); nv_wr32(mmu, 0x100850, 0x00008000);
nv_mask(priv, 0x10008c, 0x00000200, 0x00000200); nv_mask(mmu, 0x10008c, 0x00000200, 0x00000200);
nv_wr32(priv, 0x100820, 0x00000000); nv_wr32(mmu, 0x100820, 0x00000000);
nv_wr32(priv, 0x10082c, 0x00000001); nv_wr32(mmu, 0x10082c, 0x00000001);
nv_wr32(priv, 0x100800, addr | 0x00000010); nv_wr32(mmu, 0x100800, addr | 0x00000010);
return 0; return 0;
} }
......
...@@ -29,10 +29,6 @@ ...@@ -29,10 +29,6 @@
#include <core/engine.h> #include <core/engine.h>
#include <core/gpuobj.h> #include <core/gpuobj.h>
struct nv50_mmu_priv {
struct nvkm_mmu base;
};
static void static void
nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2]) nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
{ {
...@@ -149,20 +145,20 @@ nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) ...@@ -149,20 +145,20 @@ nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
static void static void
nv50_vm_flush(struct nvkm_vm *vm) nv50_vm_flush(struct nvkm_vm *vm)
{ {
struct nv50_mmu_priv *priv = (void *)vm->mmu; struct nvkm_mmu *mmu = (void *)vm->mmu;
struct nvkm_bar *bar = nvkm_bar(priv); struct nvkm_bar *bar = nvkm_bar(mmu);
struct nvkm_engine *engine; struct nvkm_engine *engine;
int i, vme; int i, vme;
bar->flush(bar); bar->flush(bar);
mutex_lock(&nv_subdev(priv)->mutex); mutex_lock(&nv_subdev(mmu)->mutex);
for (i = 0; i < NVDEV_SUBDEV_NR; i++) { for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
if (!atomic_read(&vm->engref[i])) if (!atomic_read(&vm->engref[i]))
continue; continue;
/* unfortunate hw bug workaround... */ /* unfortunate hw bug workaround... */
engine = nvkm_engine(priv, i); engine = nvkm_engine(mmu, i);
if (engine && engine->tlb_flush) { if (engine && engine->tlb_flush) {
engine->tlb_flush(engine); engine->tlb_flush(engine);
continue; continue;
...@@ -184,11 +180,11 @@ nv50_vm_flush(struct nvkm_vm *vm) ...@@ -184,11 +180,11 @@ nv50_vm_flush(struct nvkm_vm *vm)
continue; continue;
} }
nv_wr32(priv, 0x100c80, (vme << 16) | 1); nv_wr32(mmu, 0x100c80, (vme << 16) | 1);
if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) if (!nv_wait(mmu, 0x100c80, 0x00000001, 0x00000000))
nv_error(priv, "vm flush timeout: engine %d\n", vme); nv_error(mmu, "vm flush timeout: engine %d\n", vme);
} }
mutex_unlock(&nv_subdev(priv)->mutex); mutex_unlock(&nv_subdev(mmu)->mutex);
} }
static int static int
...@@ -207,25 +203,25 @@ nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -207,25 +203,25 @@ nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv50_mmu_priv *priv; struct nvkm_mmu *mmu;
int ret; int ret;
ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv); ret = nvkm_mmu_create(parent, engine, oclass, "VM", "mmu", &mmu);
*pobject = nv_object(priv); *pobject = nv_object(mmu);
if (ret) if (ret)
return ret; return ret;
priv->base.limit = 1ULL << 40; mmu->limit = 1ULL << 40;
priv->base.dma_bits = 40; mmu->dma_bits = 40;
priv->base.pgt_bits = 29 - 12; mmu->pgt_bits = 29 - 12;
priv->base.spg_shift = 12; mmu->spg_shift = 12;
priv->base.lpg_shift = 16; mmu->lpg_shift = 16;
priv->base.create = nv50_vm_create; mmu->create = nv50_vm_create;
priv->base.map_pgt = nv50_vm_map_pgt; mmu->map_pgt = nv50_vm_map_pgt;
priv->base.map = nv50_vm_map; mmu->map = nv50_vm_map;
priv->base.map_sg = nv50_vm_map_sg; mmu->map_sg = nv50_vm_map_sg;
priv->base.unmap = nv50_vm_unmap; mmu->unmap = nv50_vm_unmap;
priv->base.flush = nv50_vm_flush; mmu->flush = nv50_vm_flush;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment