Commit eb813999 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: implement new vmm backend

This is the common code to support a rework of the VMM backends.

It adds support for more than 2 levels of page table nesting, which
is required to be able to support GP100's MMU layout.

Sparse mappings (that don't cause MMU faults when accessed) are now
supported, where the backend provides it.

Dual-PT handling had to become more sophisticated to support sparse,
but this also allows us to support an optimisation the MMU provides
on GK104 and newer.

Certain operations can now be combined into a single page tree walk
to avoid some overhead, but also enables optimsations like skipping
PTE unmap writes when the PT will be destroyed anyway.

The old backend has been hacked up to forward requests onto the new
backend, if present, so that it's possible to bisect between issues
in the backend changes vs the upcoming frontend changes.

Until the new frontend has been merged, new backends will leak BAR2
page tables on module unload.  This is expected, and it's not worth
the effort of hacking around this as it doesn't effect runtime.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent bda9e379
...@@ -56,6 +56,13 @@ config NOUVEAU_DEBUG_DEFAULT ...@@ -56,6 +56,13 @@ config NOUVEAU_DEBUG_DEFAULT
help help
Selects the default debug level Selects the default debug level
config NOUVEAU_DEBUG_MMU
bool "Enable additional MMU debugging"
depends on DRM_NOUVEAU
default n
help
Say Y here if you want to enable verbose MMU debug output.
config DRM_NOUVEAU_BACKLIGHT config DRM_NOUVEAU_BACKLIGHT
bool "Support for backlight control" bool "Support for backlight control"
depends on DRM_NOUVEAU depends on DRM_NOUVEAU
......
#ifndef __NVKM_MMU_H__ #ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__ #define __NVKM_MMU_H__
#include <core/subdev.h> #include <core/subdev.h>
#include <core/memory.h>
#include <core/mm.h> #include <core/mm.h>
struct nvkm_gpuobj; struct nvkm_gpuobj;
struct nvkm_mem; struct nvkm_mem;
...@@ -11,6 +12,8 @@ struct nvkm_vm_pgt { ...@@ -11,6 +12,8 @@ struct nvkm_vm_pgt {
}; };
struct nvkm_vma { struct nvkm_vma {
struct nvkm_memory *memory;
struct nvkm_tags *tags;
struct nvkm_vm *vm; struct nvkm_vm *vm;
struct nvkm_mm_node *node; struct nvkm_mm_node *node;
union { union {
...@@ -24,6 +27,7 @@ struct nvkm_vm { ...@@ -24,6 +27,7 @@ struct nvkm_vm {
const struct nvkm_vmm_func *func; const struct nvkm_vmm_func *func;
struct nvkm_mmu *mmu; struct nvkm_mmu *mmu;
const char *name; const char *name;
u32 debug;
struct kref kref; struct kref kref;
struct mutex mutex; struct mutex mutex;
...@@ -58,6 +62,25 @@ void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *); ...@@ -58,6 +62,25 @@ void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
void nvkm_vm_unmap(struct nvkm_vma *); void nvkm_vm_unmap(struct nvkm_vma *);
void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length); void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
int nvkm_vmm_boot(struct nvkm_vmm *);
struct nvkm_vmm_map {
struct nvkm_memory *memory;
u64 offset;
struct nvkm_mm_node *mem;
struct scatterlist *sgl;
dma_addr_t *dma;
u64 off;
const struct nvkm_vmm_page *page;
struct nvkm_tags *tags;
u64 next;
u64 type;
u64 ctag;
};
struct nvkm_mmu { struct nvkm_mmu {
const struct nvkm_mmu_func *func; const struct nvkm_mmu_func *func;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
......
...@@ -213,6 +213,36 @@ nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero) ...@@ -213,6 +213,36 @@ nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
return pt; return pt;
} }
static void
nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta,
struct nvkm_mem *mem, nvkm_vmm_pte_func fn,
struct nvkm_vmm_map *map)
{
struct nvkm_vmm *vmm = vma->vm;
void *argv = NULL;
u32 argc = 0;
int ret;
map->memory = mem->memory;
map->page = page;
if (vmm->func->valid) {
ret = vmm->func->valid(vmm, argv, argc, map);
if (WARN_ON(ret))
return;
}
mutex_lock(&vmm->mutex);
nvkm_vmm_ptes_map(vmm, page, ((u64)vma->node->offset << 12) + delta,
(u64)vma->node->length << 12, map, fn);
mutex_unlock(&vmm->mutex);
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
vma->memory = nvkm_memory_ref(map->memory);
vma->tags = map->tags;
}
void void
nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu) nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
{ {
...@@ -251,6 +281,7 @@ nvkm_mmu_ptc_init(struct nvkm_mmu *mmu) ...@@ -251,6 +281,7 @@ nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
void void
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page;
struct nvkm_vm *vm = vma->vm; struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu; struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_mm_node *r = node->mem; struct nvkm_mm_node *r = node->mem;
...@@ -262,6 +293,14 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) ...@@ -262,6 +293,14 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
u32 max = 1 << (mmu->func->pgt_bits - bits); u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len; u32 end, len;
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .mem = node->mem };
while (page->shift != vma->node->type)
page++;
nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
return;
}
delta = 0; delta = 0;
while (r) { while (r) {
u64 phys = (u64)r->offset << 12; u64 phys = (u64)r->offset << 12;
...@@ -297,6 +336,7 @@ static void ...@@ -297,6 +336,7 @@ static void
nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem) struct nvkm_mem *mem)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page;
struct nvkm_vm *vm = vma->vm; struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu; struct nvkm_mmu *mmu = vm->mmu;
int big = vma->node->type != mmu->func->spg_shift; int big = vma->node->type != mmu->func->spg_shift;
...@@ -311,6 +351,14 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, ...@@ -311,6 +351,14 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
int i; int i;
struct scatterlist *sg; struct scatterlist *sg;
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
while (page->shift != vma->node->type)
page++;
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
return;
}
for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) { for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
sglen = sg_dma_len(sg) >> PAGE_SHIFT; sglen = sg_dma_len(sg) >> PAGE_SHIFT;
...@@ -355,6 +403,7 @@ static void ...@@ -355,6 +403,7 @@ static void
nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_mem *mem) struct nvkm_mem *mem)
{ {
const struct nvkm_vmm_page *page = vma->vm->func->page;
struct nvkm_vm *vm = vma->vm; struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu; struct nvkm_mmu *mmu = vm->mmu;
dma_addr_t *list = mem->pages; dma_addr_t *list = mem->pages;
...@@ -367,6 +416,14 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, ...@@ -367,6 +416,14 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
u32 max = 1 << (mmu->func->pgt_bits - bits); u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len; u32 end, len;
if (page->desc->func->unmap) {
struct nvkm_vmm_map map = { .dma = mem->pages };
while (page->shift != vma->node->type)
page++;
nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
return;
}
while (num) { while (num) {
struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
...@@ -415,6 +472,17 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length) ...@@ -415,6 +472,17 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
u32 max = 1 << (mmu->func->pgt_bits - bits); u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len; u32 end, len;
if (vm->func->page->desc->func->unmap) {
const struct nvkm_vmm_page *page = vm->func->page;
while (page->shift != vma->node->type)
page++;
mutex_lock(&vm->mutex);
nvkm_vmm_ptes_unmap(vm, page, (vma->node->offset << 12) + delta,
vma->node->length << 12, false);
mutex_unlock(&vm->mutex);
return;
}
while (num) { while (num) {
struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
...@@ -440,6 +508,9 @@ void ...@@ -440,6 +508,9 @@ void
nvkm_vm_unmap(struct nvkm_vma *vma) nvkm_vm_unmap(struct nvkm_vma *vma)
{ {
nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
nvkm_memory_tags_put(vma->memory, vma->vm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
} }
static void static void
...@@ -509,6 +580,22 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, ...@@ -509,6 +580,22 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
return ret; return ret;
} }
if (vm->func->page->desc->func->unmap) {
const struct nvkm_vmm_page *page = vm->func->page;
while (page->shift != page_shift)
page++;
ret = nvkm_vmm_ptes_get(vm, page, vma->node->offset << 12,
vma->node->length << 12);
if (ret) {
nvkm_mm_free(&vm->mm, &vma->node);
mutex_unlock(&vm->mutex);
return ret;
}
goto done;
}
fpde = (vma->node->offset >> mmu->func->pgt_bits); fpde = (vma->node->offset >> mmu->func->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
...@@ -530,8 +617,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, ...@@ -530,8 +617,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
return ret; return ret;
} }
} }
done:
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
vma->memory = NULL;
vma->tags = NULL;
vma->vm = NULL; vma->vm = NULL;
nvkm_vm_ref(vm, &vma->vm, NULL); nvkm_vm_ref(vm, &vma->vm, NULL);
vma->offset = (u64)vma->node->offset << 12; vma->offset = (u64)vma->node->offset << 12;
...@@ -551,11 +641,25 @@ nvkm_vm_put(struct nvkm_vma *vma) ...@@ -551,11 +641,25 @@ nvkm_vm_put(struct nvkm_vma *vma)
vm = vma->vm; vm = vma->vm;
mmu = vm->mmu; mmu = vm->mmu;
nvkm_memory_tags_put(vma->memory, mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
fpde = (vma->node->offset >> mmu->func->pgt_bits); fpde = (vma->node->offset >> mmu->func->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
if (vm->func->page->desc->func->unmap) {
const struct nvkm_vmm_page *page = vm->func->page;
while (page->shift != vma->node->type)
page++;
nvkm_vmm_ptes_put(vm, page, vma->node->offset << 12,
vma->node->length << 12);
goto done;
}
nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde); nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
done:
nvkm_mm_free(&vm->mm, &vma->node); nvkm_mm_free(&vm->mm, &vma->node);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
...@@ -569,6 +673,9 @@ nvkm_vm_boot(struct nvkm_vm *vm, u64 size) ...@@ -569,6 +673,9 @@ nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
struct nvkm_memory *pgt; struct nvkm_memory *pgt;
int ret; int ret;
if (vm->func->page->desc->func->unmap)
return nvkm_vmm_boot(vm);
ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST, ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
(size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt); (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
if (ret == 0) { if (ret == 0) {
...@@ -660,7 +767,7 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst) ...@@ -660,7 +767,7 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
if (ret) if (ret)
return ret; return ret;
if (ref->mmu->func->map_pgt) { if (!ref->func->page->desc->func->unmap && ref->mmu->func->map_pgt) {
for (i = ref->fpde; i <= ref->lpde; i++) for (i = ref->fpde; i <= ref->lpde; i++)
ref->mmu->func->map_pgt(ref, i, ref->pgt[i - ref->fpde].mem); ref->mmu->func->map_pgt(ref, i, ref->pgt[i - ref->fpde].mem);
} }
...@@ -672,8 +779,12 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst) ...@@ -672,8 +779,12 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
if (*ptr) { if (*ptr) {
if ((*ptr)->func->part && inst) if ((*ptr)->func->part && inst)
(*ptr)->func->part(*ptr, inst); (*ptr)->func->part(*ptr, inst);
if ((*ptr)->bootstrapped && inst) if ((*ptr)->bootstrapped && inst) {
nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]); if (!(*ptr)->func->page->desc->func->unmap) {
nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
(*ptr)->bootstrapped = false;
}
}
kref_put(&(*ptr)->refcount, nvkm_vm_del); kref_put(&(*ptr)->refcount, nvkm_vm_del);
} }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __NVKM_VMM_H__ #define __NVKM_VMM_H__
#include "priv.h" #include "priv.h"
#include <core/memory.h> #include <core/memory.h>
enum nvkm_memory_target;
struct nvkm_vmm_pt { struct nvkm_vmm_pt {
/* Some GPUs have a mapping level with a dual page tables to /* Some GPUs have a mapping level with a dual page tables to
...@@ -49,7 +50,23 @@ struct nvkm_vmm_pt { ...@@ -49,7 +50,23 @@ struct nvkm_vmm_pt {
u8 pte[]; u8 pte[];
}; };
typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *,
struct nvkm_vmm_pt *, u32 pdei);
typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *,
u32 ptei, u32 ptes, struct nvkm_vmm_map *);
struct nvkm_vmm_desc_func { struct nvkm_vmm_desc_func {
nvkm_vmm_pxe_func invalid;
nvkm_vmm_pxe_func unmap;
nvkm_vmm_pxe_func sparse;
nvkm_vmm_pde_func pde;
nvkm_vmm_pte_func mem;
nvkm_vmm_pte_func dma;
nvkm_vmm_pte_func sgl;
}; };
extern const struct nvkm_vmm_desc_func gf100_vmm_pgd; extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
...@@ -106,6 +123,11 @@ struct nvkm_vmm_func { ...@@ -106,6 +123,11 @@ struct nvkm_vmm_func {
int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst); int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst); void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
int (*aper)(enum nvkm_memory_target);
int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
struct nvkm_vmm_map *);
void (*flush)(struct nvkm_vmm *, int depth);
u64 page_block; u64 page_block;
const struct nvkm_vmm_page page[]; const struct nvkm_vmm_page page[];
}; };
...@@ -122,6 +144,15 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *, ...@@ -122,6 +144,15 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, u64 addr, u64 size, struct lock_class_key *, u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
const char *name, struct nvkm_vmm *); const char *name, struct nvkm_vmm *);
void nvkm_vmm_dtor(struct nvkm_vmm *); void nvkm_vmm_dtor(struct nvkm_vmm *);
void nvkm_vmm_ptes_put(struct nvkm_vmm *, const struct nvkm_vmm_page *,
u64 addr, u64 size);
int nvkm_vmm_ptes_get(struct nvkm_vmm *, const struct nvkm_vmm_page *,
u64 addr, u64 size);
void nvkm_vmm_ptes_map(struct nvkm_vmm *, const struct nvkm_vmm_page *,
u64 addr, u64 size, struct nvkm_vmm_map *,
nvkm_vmm_pte_func);
void nvkm_vmm_ptes_unmap(struct nvkm_vmm *, const struct nvkm_vmm_page *,
u64 addr, u64 size, bool sparse);
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32, int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
u64, u64, void *, u32, struct lock_class_key *, u64, u64, void *, u32, struct lock_class_key *,
...@@ -176,4 +207,85 @@ int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, ...@@ -176,4 +207,85 @@ int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct lock_class_key *, const char *,
struct nvkm_vmm **); struct nvkm_vmm **);
#define VMM_PRINT(l,v,p,f,a...) do { \
struct nvkm_vmm *_vmm = (v); \
if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) { \
nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n", \
_vmm->name, ##a); \
} \
} while(0)
#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a)
#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a)
#define VMM_SPAM(v,f,a...) VMM_PRINT(NV_DBG_SPAM , (v), dbg, f, ##a)
#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do { \
nvkm_kmap((PT)->memory); \
while (PTEN) { \
u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift; \
u64 _addr = ((BASE) + MAP->off); \
\
if (_ptes > PTEN) { \
MAP->off += PTEN << MAP->page->shift; \
_ptes = PTEN; \
} else { \
MAP->off = 0; \
NEXT; \
} \
\
VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes); \
\
FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
PTEI += _ptes; \
PTEN -= _ptes; \
}; \
nvkm_done((PT)->memory); \
} while(0)
#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL) \
VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT), \
((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT), \
(MAP->mem = MAP->mem->next))
#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL) \
VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
*MAP->dma, PAGE_SIZE, MAP->dma++)
#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL) \
VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl), \
(MAP->sgl = sg_next(MAP->sgl)))
#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c))
#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d))
#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do { \
const u32 _pteo = (o); u##b _data = (d); \
VMM_SPAM((v), " %010llx "f, (m)->addr + _pteo, _data, ##a); \
VMM_##fn((m), (m)->base + _pteo, _data, (c), b); \
} while(0)
#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 32, WO, "%08x")
#define VMM_FO032(m,v,o,d,c) \
VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c))
#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 64, WO, "%016llx")
#define VMM_FO064(m,v,o,d,c) \
VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c))
#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do { \
u32 _pteo = (o), _ptes = (c); \
const u64 _addr = (m)->addr + _pteo; \
VMM_SPAM((v), " %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a); \
while (_ptes--) { \
nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo)); \
nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi)); \
_pteo += 0x10; \
} \
} while(0)
#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "")
#define VMM_FO128(m,v,o,lo,hi,c) do { \
nvkm_kmap((m)->memory); \
VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c)); \
nvkm_done((m)->memory); \
} while(0)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment