Commit 03edf1b3 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/imem/nv50: support eviction of BAR2 mappings

A good deal of the structures we map into here aren't accessed very often
at all, and Fedora 26 has exposed an issue where after creating a heap of
channels, BAR2 space would run out, and we'd need to make use of the slow
path while accessing important structures like page tables.

This implements an LRU on BAR2 space, which allows eviction of mappings
that aren't currently needed, to make space for other objects.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 69b136f2
...@@ -32,6 +32,9 @@ ...@@ -32,6 +32,9 @@
struct nv50_instmem { struct nv50_instmem {
struct nvkm_instmem base; struct nvkm_instmem base;
u64 addr; u64 addr;
/* Mappings that can be evicted when BAR2 space has been exhausted. */
struct list_head lru;
}; };
/****************************************************************************** /******************************************************************************
...@@ -46,6 +49,7 @@ struct nv50_instobj { ...@@ -46,6 +49,7 @@ struct nv50_instobj {
struct nvkm_vma bar; struct nvkm_vma bar;
refcount_t maps; refcount_t maps;
void *map; void *map;
struct list_head lru;
}; };
static void static void
...@@ -116,11 +120,13 @@ static void ...@@ -116,11 +120,13 @@ static void
nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
{ {
struct nv50_instmem *imem = iobj->imem; struct nv50_instmem *imem = iobj->imem;
struct nv50_instobj *eobj;
struct nvkm_memory *memory = &iobj->base.memory; struct nvkm_memory *memory = &iobj->base.memory;
struct nvkm_subdev *subdev = &imem->base.subdev; struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_vma bar = {}; struct nvkm_vma bar = {}, ebar;
u64 size = nvkm_memory_size(memory); u64 size = nvkm_memory_size(memory);
void *emap;
int ret; int ret;
/* Attempt to allocate BAR2 address-space and map the object /* Attempt to allocate BAR2 address-space and map the object
...@@ -128,7 +134,30 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) ...@@ -128,7 +134,30 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
* to the possibility of recursion for page table allocation. * to the possibility of recursion for page table allocation.
*/ */
mutex_unlock(&subdev->mutex); mutex_unlock(&subdev->mutex);
ret = nvkm_vm_get(vmm, size, 12, NV_MEM_ACCESS_RW, &bar); while ((ret = nvkm_vm_get(vmm, size, 12, NV_MEM_ACCESS_RW, &bar))) {
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
mutex_lock(&subdev->mutex);
eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
if (eobj) {
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
nvkm_memory_addr(&eobj->base.memory),
nvkm_memory_size(&eobj->base.memory),
eobj->bar.offset);
list_del_init(&eobj->lru);
ebar = eobj->bar;
eobj->bar.node = NULL;
emap = eobj->map;
eobj->map = NULL;
}
mutex_unlock(&subdev->mutex);
if (!eobj)
break;
iounmap(emap);
nvkm_vm_put(&ebar);
}
if (ret == 0) if (ret == 0)
nvkm_memory_map(memory, &bar, 0); nvkm_memory_map(memory, &bar, 0);
mutex_lock(&subdev->mutex); mutex_lock(&subdev->mutex);
...@@ -168,6 +197,14 @@ nv50_instobj_release(struct nvkm_memory *memory) ...@@ -168,6 +197,14 @@ nv50_instobj_release(struct nvkm_memory *memory)
nvkm_bar_flush(subdev->device->bar); nvkm_bar_flush(subdev->device->bar);
if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) { if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
/* Add the now-unused mapping to the LRU instead of directly
* unmapping it here, in case we need to map it again later.
*/
if (likely(iobj->lru.next) && iobj->map) {
BUG_ON(!list_empty(&iobj->lru));
list_add_tail(&iobj->lru, &imem->lru);
}
/* Switch back to NULL accessors when last map is gone. */ /* Switch back to NULL accessors when last map is gone. */
iobj->base.memory.ptrs = &nv50_instobj_slow; iobj->base.memory.ptrs = &nv50_instobj_slow;
mutex_unlock(&subdev->mutex); mutex_unlock(&subdev->mutex);
...@@ -203,6 +240,10 @@ nv50_instobj_acquire(struct nvkm_memory *memory) ...@@ -203,6 +240,10 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
} }
if (!refcount_inc_not_zero(&iobj->maps)) { if (!refcount_inc_not_zero(&iobj->maps)) {
/* Exclude object from eviction while it's being accessed. */
if (likely(iobj->lru.next))
list_del_init(&iobj->lru);
if (map) if (map)
iobj->base.memory.ptrs = &nv50_instobj_fast; iobj->base.memory.ptrs = &nv50_instobj_fast;
else else
...@@ -220,7 +261,15 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm) ...@@ -220,7 +261,15 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
struct nv50_instobj *iobj = nv50_instobj(memory); struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_instmem *imem = &iobj->imem->base; struct nvkm_instmem *imem = &iobj->imem->base;
/* Exclude bootstrapped objects (ie. the page tables for the
* instmem BAR itself) from eviction.
*/
mutex_lock(&imem->subdev.mutex); mutex_lock(&imem->subdev.mutex);
if (likely(iobj->lru.next)) {
list_del_init(&iobj->lru);
iobj->lru.next = NULL;
}
nv50_instobj_kmap(iobj, vmm); nv50_instobj_kmap(iobj, vmm);
mutex_unlock(&imem->subdev.mutex); mutex_unlock(&imem->subdev.mutex);
} }
...@@ -249,10 +298,21 @@ nv50_instobj_dtor(struct nvkm_memory *memory) ...@@ -249,10 +298,21 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
struct nv50_instobj *iobj = nv50_instobj(memory); struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_instmem *imem = &iobj->imem->base; struct nvkm_instmem *imem = &iobj->imem->base;
struct nvkm_ram *ram = imem->subdev.device->fb->ram; struct nvkm_ram *ram = imem->subdev.device->fb->ram;
if (iobj->map) { struct nvkm_vma bar;
iounmap(iobj->map); void *map = map;
nvkm_vm_put(&iobj->bar);
mutex_lock(&imem->subdev.mutex);
if (likely(iobj->lru.next))
list_del(&iobj->lru);
map = iobj->map;
bar = iobj->bar;
mutex_unlock(&imem->subdev.mutex);
if (map) {
iounmap(map);
nvkm_vm_put(&bar);
} }
ram->func->put(ram, &iobj->mem); ram->func->put(ram, &iobj->mem);
nvkm_instobj_dtor(imem, &iobj->base); nvkm_instobj_dtor(imem, &iobj->base);
return iobj; return iobj;
...@@ -287,6 +347,7 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, ...@@ -287,6 +347,7 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv50_instobj_slow; iobj->base.memory.ptrs = &nv50_instobj_slow;
iobj->imem = imem; iobj->imem = imem;
refcount_set(&iobj->maps, 0); refcount_set(&iobj->maps, 0);
INIT_LIST_HEAD(&iobj->lru);
size = max((size + 4095) & ~4095, (u32)4096); size = max((size + 4095) & ~4095, (u32)4096);
align = max((align + 4095) & ~4095, (u32)4096); align = max((align + 4095) & ~4095, (u32)4096);
...@@ -326,6 +387,7 @@ nv50_instmem_new(struct nvkm_device *device, int index, ...@@ -326,6 +387,7 @@ nv50_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base); nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
INIT_LIST_HEAD(&imem->lru);
*pimem = &imem->base; *pimem = &imem->base;
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment