Commit 9202d732 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/imem/nv50-: use new interfaces for vmm operations

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 6f4dc18c
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
struct gk20a_instobj { struct gk20a_instobj {
struct nvkm_memory memory; struct nvkm_memory memory;
struct nvkm_mem mem; struct nvkm_mm_node *mn;
struct gk20a_instmem *imem; struct gk20a_instmem *imem;
/* CPU mapping */ /* CPU mapping */
...@@ -129,13 +129,13 @@ gk20a_instobj_page(struct nvkm_memory *memory) ...@@ -129,13 +129,13 @@ gk20a_instobj_page(struct nvkm_memory *memory)
static u64 static u64
gk20a_instobj_addr(struct nvkm_memory *memory) gk20a_instobj_addr(struct nvkm_memory *memory)
{ {
return gk20a_instobj(memory)->mem.offset; return (u64)gk20a_instobj(memory)->mn->offset << 12;
} }
static u64 static u64
gk20a_instobj_size(struct nvkm_memory *memory) gk20a_instobj_size(struct nvkm_memory *memory)
{ {
return (u64)gk20a_instobj(memory)->mem.size << 12; return (u64)gk20a_instobj(memory)->mn->length << 12;
} }
/* /*
...@@ -284,8 +284,22 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, ...@@ -284,8 +284,22 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc) struct nvkm_vma *vma, void *argv, u32 argc)
{ {
struct gk20a_instobj *node = gk20a_instobj(memory); struct gk20a_instobj *node = gk20a_instobj(memory);
nvkm_vm_map_at(vma, 0, &node->mem); struct nvkm_vmm_map map = {
.memory = &node->memory,
.offset = offset,
.mem = node->mn,
};
if (vma->vm) {
struct nvkm_mem mem = {
.mem = node->mn,
.memory = &node->memory,
};
nvkm_vm_map_at(vma, 0, &mem);
return 0; return 0;
}
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
} }
static void * static void *
...@@ -298,8 +312,8 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory) ...@@ -298,8 +312,8 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
if (unlikely(!node->base.vaddr)) if (unlikely(!node->base.vaddr))
goto out; goto out;
dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
node->handle, imem->attrs); node->base.vaddr, node->handle, imem->attrs);
out: out:
return node; return node;
...@@ -311,7 +325,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) ...@@ -311,7 +325,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem; struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev; struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r = node->base.mem.mem; struct nvkm_mm_node *r = node->base.mn;
int i; int i;
if (unlikely(!r)) if (unlikely(!r))
...@@ -329,7 +343,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) ...@@ -329,7 +343,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */ /* Unmap pages from GPU address space and free them */
for (i = 0; i < node->base.mem.size; i++) { for (i = 0; i < node->base.mn->length; i++) {
iommu_unmap(imem->domain, iommu_unmap(imem->domain,
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
...@@ -410,8 +424,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, ...@@ -410,8 +424,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node->r.offset = node->handle >> 12; node->r.offset = node->handle >> 12;
node->r.length = (npages << PAGE_SHIFT) >> 12; node->r.length = (npages << PAGE_SHIFT) >> 12;
node->base.mem.offset = node->handle; node->base.mn = &node->r;
node->base.mem.mem = &node->r;
return 0; return 0;
} }
...@@ -488,8 +501,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, ...@@ -488,8 +501,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
/* IOMMU bit tells that an address is to be resolved through the IOMMU */ /* IOMMU bit tells that an address is to be resolved through the IOMMU */
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; node->base.mn = r;
node->base.mem.mem = r;
return 0; return 0;
release_area: release_area:
...@@ -537,13 +549,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, ...@@ -537,13 +549,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
node->imem = imem; node->imem = imem;
/* present memory for being mapped using small pages */
node->mem.size = size >> 12;
node->mem.memtype = 0;
node->mem.memory = &node->memory;
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
size, align, node->mem.offset); size, align, (u64)node->mn->offset << 12);
return 0; return 0;
} }
......
...@@ -46,7 +46,7 @@ struct nv50_instobj { ...@@ -46,7 +46,7 @@ struct nv50_instobj {
struct nvkm_instobj base; struct nvkm_instobj base;
struct nv50_instmem *imem; struct nv50_instmem *imem;
struct nvkm_memory *ram; struct nvkm_memory *ram;
struct nvkm_vma bar; struct nvkm_vma *bar;
refcount_t maps; refcount_t maps;
void *map; void *map;
struct list_head lru; struct list_head lru;
...@@ -124,7 +124,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) ...@@ -124,7 +124,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
struct nvkm_memory *memory = &iobj->base.memory; struct nvkm_memory *memory = &iobj->base.memory;
struct nvkm_subdev *subdev = &imem->base.subdev; struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_vma bar = {}, ebar; struct nvkm_vma *bar = NULL, *ebar;
u64 size = nvkm_memory_size(memory); u64 size = nvkm_memory_size(memory);
void *emap; void *emap;
int ret; int ret;
...@@ -134,7 +134,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) ...@@ -134,7 +134,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
* to the possibility of recursion for page table allocation. * to the possibility of recursion for page table allocation.
*/ */
mutex_unlock(&subdev->mutex); mutex_unlock(&subdev->mutex);
while ((ret = nvkm_vm_get(vmm, size, 12, NV_MEM_ACCESS_RW, &bar))) { while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
/* Evict unused mappings, and keep retrying until we either /* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU. * succeed,or there's no more objects left on the LRU.
*/ */
...@@ -144,10 +144,10 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) ...@@ -144,10 +144,10 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n", nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
nvkm_memory_addr(&eobj->base.memory), nvkm_memory_addr(&eobj->base.memory),
nvkm_memory_size(&eobj->base.memory), nvkm_memory_size(&eobj->base.memory),
eobj->bar.offset); eobj->bar->addr);
list_del_init(&eobj->lru); list_del_init(&eobj->lru);
ebar = eobj->bar; ebar = eobj->bar;
eobj->bar.node = NULL; eobj->bar = NULL;
emap = eobj->map; emap = eobj->map;
eobj->map = NULL; eobj->map = NULL;
} }
...@@ -155,16 +155,16 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) ...@@ -155,16 +155,16 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
if (!eobj) if (!eobj)
break; break;
iounmap(emap); iounmap(emap);
nvkm_vm_put(&ebar); nvkm_vmm_put(vmm, &ebar);
} }
if (ret == 0) if (ret == 0)
ret = nvkm_memory_map(memory, 0, vmm, &bar, NULL, 0); ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
mutex_lock(&subdev->mutex); mutex_lock(&subdev->mutex);
if (ret || iobj->bar.node) { if (ret || iobj->bar) {
/* We either failed, or another thread beat us. */ /* We either failed, or another thread beat us. */
mutex_unlock(&subdev->mutex); mutex_unlock(&subdev->mutex);
nvkm_vm_put(&bar); nvkm_vmm_put(vmm, &bar);
mutex_lock(&subdev->mutex); mutex_lock(&subdev->mutex);
return; return;
} }
...@@ -172,10 +172,10 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) ...@@ -172,10 +172,10 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
/* Make the mapping visible to the host. */ /* Make the mapping visible to the host. */
iobj->bar = bar; iobj->bar = bar;
iobj->map = ioremap_wc(device->func->resource_addr(device, 3) + iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
(u32)iobj->bar.offset, size); (u32)iobj->bar->addr, size);
if (!iobj->map) { if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n"); nvkm_warn(subdev, "PRAMIN ioremap failed\n");
nvkm_vm_put(&iobj->bar); nvkm_vmm_put(vmm, &iobj->bar);
} }
} }
...@@ -299,7 +299,7 @@ nv50_instobj_dtor(struct nvkm_memory *memory) ...@@ -299,7 +299,7 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
{ {
struct nv50_instobj *iobj = nv50_instobj(memory); struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_instmem *imem = &iobj->imem->base; struct nvkm_instmem *imem = &iobj->imem->base;
struct nvkm_vma bar; struct nvkm_vma *bar;
void *map = map; void *map = map;
mutex_lock(&imem->subdev.mutex); mutex_lock(&imem->subdev.mutex);
...@@ -310,8 +310,10 @@ nv50_instobj_dtor(struct nvkm_memory *memory) ...@@ -310,8 +310,10 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
mutex_unlock(&imem->subdev.mutex); mutex_unlock(&imem->subdev.mutex);
if (map) { if (map) {
struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
iounmap(map); iounmap(map);
nvkm_vm_put(&bar); if (likely(vmm)) /* Can be NULL during BAR destructor. */
nvkm_vmm_put(vmm, &bar);
} }
nvkm_memory_unref(&iobj->ram); nvkm_memory_unref(&iobj->ram);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment