Commit e5bf9a5c authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/instmem: protect mm/lru with private mutex

nvkm_subdev.mutex is going away.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent 0e65ec75
......@@ -13,6 +13,11 @@ struct nvkm_instmem {
struct list_head boot;
u32 reserved;
/* <=nv4x: protects NV_PRAMIN/BAR2 MM
* >=nv50: protects BAR2 MM & LRU
*/
struct mutex mutex;
struct nvkm_memory *vbios;
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
......
......@@ -218,9 +218,11 @@ static void *
nvkm_instmem_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
void *data = imem;
if (imem->func->dtor)
return imem->func->dtor(imem);
return imem;
data = imem->func->dtor(imem);
mutex_destroy(&imem->mutex);
return data;
}
static const struct nvkm_subdev_func
......@@ -241,4 +243,5 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
INIT_LIST_HEAD(&imem->boot);
mutex_init(&imem->mutex);
}
......@@ -99,9 +99,9 @@ static void *
nv04_instobj_dtor(struct nvkm_memory *memory)
{
struct nv04_instobj *iobj = nv04_instobj(memory);
mutex_lock(&iobj->imem->base.subdev.mutex);
mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
mutex_unlock(&iobj->imem->base.subdev.mutex);
mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
......@@ -132,10 +132,9 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv04_instobj_ptrs;
iobj->imem = imem;
mutex_lock(&imem->base.subdev.mutex);
ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
align ? align : 1, &iobj->node);
mutex_unlock(&imem->base.subdev.mutex);
mutex_lock(&imem->base.mutex);
ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
mutex_unlock(&imem->base.mutex);
return ret;
}
......
......@@ -99,9 +99,9 @@ static void *
nv40_instobj_dtor(struct nvkm_memory *memory)
{
struct nv40_instobj *iobj = nv40_instobj(memory);
mutex_lock(&iobj->imem->base.subdev.mutex);
mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
mutex_unlock(&iobj->imem->base.subdev.mutex);
mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
......@@ -132,10 +132,9 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv40_instobj_ptrs;
iobj->imem = imem;
mutex_lock(&imem->base.subdev.mutex);
ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
align ? align : 1, &iobj->node);
mutex_unlock(&imem->base.subdev.mutex);
mutex_lock(&imem->base.mutex);
ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
mutex_unlock(&imem->base.mutex);
return ret;
}
......
......@@ -133,12 +133,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
* into it. The lock has to be dropped while doing this due
* to the possibility of recursion for page table allocation.
*/
mutex_unlock(&subdev->mutex);
mutex_unlock(&imem->base.mutex);
while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
mutex_lock(&subdev->mutex);
mutex_lock(&imem->base.mutex);
eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
if (eobj) {
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
......@@ -151,7 +151,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
emap = eobj->map;
eobj->map = NULL;
}
mutex_unlock(&subdev->mutex);
mutex_unlock(&imem->base.mutex);
if (!eobj)
break;
iounmap(emap);
......@@ -160,12 +160,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
if (ret == 0)
ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
mutex_lock(&subdev->mutex);
mutex_lock(&imem->base.mutex);
if (ret || iobj->bar) {
/* We either failed, or another thread beat us. */
mutex_unlock(&subdev->mutex);
mutex_unlock(&imem->base.mutex);
nvkm_vmm_put(vmm, &bar);
mutex_lock(&subdev->mutex);
mutex_lock(&imem->base.mutex);
return;
}
......@@ -197,7 +197,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
wmb();
nvkm_bar_flush(subdev->device->bar);
if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) {
/* Add the now-unused mapping to the LRU instead of directly
* unmapping it here, in case we need to map it again later.
*/
......@@ -208,7 +208,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
/* Switch back to NULL accessors when last map is gone. */
iobj->base.memory.ptrs = NULL;
mutex_unlock(&subdev->mutex);
mutex_unlock(&imem->base.mutex);
}
}
......@@ -227,9 +227,9 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
*/
mutex_lock(&imem->subdev.mutex);
mutex_lock(&imem->mutex);
if (refcount_inc_not_zero(&iobj->maps)) {
mutex_unlock(&imem->subdev.mutex);
mutex_unlock(&imem->mutex);
return iobj->map;
}
......@@ -252,7 +252,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
refcount_set(&iobj->maps, 1);
}
mutex_unlock(&imem->subdev.mutex);
mutex_unlock(&imem->mutex);
return map;
}
......@@ -265,7 +265,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
/* Exclude bootstrapped objects (ie. the page tables for the
* instmem BAR itself) from eviction.
*/
mutex_lock(&imem->subdev.mutex);
mutex_lock(&imem->mutex);
if (likely(iobj->lru.next)) {
list_del_init(&iobj->lru);
iobj->lru.next = NULL;
......@@ -273,7 +273,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
nv50_instobj_kmap(iobj, vmm);
nvkm_instmem_boot(imem);
mutex_unlock(&imem->subdev.mutex);
mutex_unlock(&imem->mutex);
}
static u64
......@@ -315,12 +315,12 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
struct nvkm_vma *bar;
void *map = map;
mutex_lock(&imem->subdev.mutex);
mutex_lock(&imem->mutex);
if (likely(iobj->lru.next))
list_del(&iobj->lru);
map = iobj->map;
bar = iobj->bar;
mutex_unlock(&imem->subdev.mutex);
mutex_unlock(&imem->mutex);
if (map) {
struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment