Commit af515ec8 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/imem/nv50: move slow-path locking into rd/wr functions

This is to simplify upcoming changes.  The slow-path is something that
currently occurs during bootstrap of the BAR2 VMM, while backing up an
object during suspend/resume, or when BAR2 address space runs out.

The latter is a real problem that can happen at runtime, and occurs in
Fedora 26 already (due to some change that causes a lot of channels to
be created at login), so ideally we'd prefer not to make it any slower.

We'd also like suspend/resume speed to not suffer.

Upcoming commits will solve those problems in a better way, making the
extra overhead of moving the locking here a non-issue.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent f584bde6
...@@ -31,8 +31,6 @@ ...@@ -31,8 +31,6 @@
struct nv50_instmem { struct nv50_instmem {
struct nvkm_instmem base; struct nvkm_instmem base;
unsigned long lock_flags;
spinlock_t lock;
u64 addr; u64 addr;
}; };
...@@ -57,12 +55,15 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) ...@@ -57,12 +55,15 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
struct nvkm_device *device = imem->base.subdev.device; struct nvkm_device *device = imem->base.subdev.device;
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL; u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL; u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
unsigned long flags;
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) { if (unlikely(imem->addr != base)) {
nvkm_wr32(device, 0x001700, base >> 16); nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base; imem->addr = base;
} }
nvkm_wr32(device, 0x700000 + addr, data); nvkm_wr32(device, 0x700000 + addr, data);
spin_unlock_irqrestore(&imem->base.lock, flags);
} }
static u32 static u32
...@@ -74,12 +75,15 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) ...@@ -74,12 +75,15 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL; u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL; u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
u32 data; u32 data;
unsigned long flags;
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) { if (unlikely(imem->addr != base)) {
nvkm_wr32(device, 0x001700, base >> 16); nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base; imem->addr = base;
} }
data = nvkm_rd32(device, 0x700000 + addr); data = nvkm_rd32(device, 0x700000 + addr);
spin_unlock_irqrestore(&imem->base.lock, flags);
return data; return data;
} }
...@@ -127,8 +131,6 @@ nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) ...@@ -127,8 +131,6 @@ nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
static void static void
nv50_instobj_release(struct nvkm_memory *memory) nv50_instobj_release(struct nvkm_memory *memory)
{ {
struct nv50_instmem *imem = nv50_instobj(memory)->imem;
spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
} }
static void __iomem * static void __iomem *
...@@ -137,15 +139,12 @@ nv50_instobj_acquire(struct nvkm_memory *memory) ...@@ -137,15 +139,12 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
struct nv50_instobj *iobj = nv50_instobj(memory); struct nv50_instobj *iobj = nv50_instobj(memory);
struct nv50_instmem *imem = iobj->imem; struct nv50_instmem *imem = iobj->imem;
struct nvkm_vm *vm; struct nvkm_vm *vm;
unsigned long flags;
if (!iobj->map && (vm = nvkm_bar_bar2_vmm(imem->base.subdev.device))) if (!iobj->map && (vm = nvkm_bar_bar2_vmm(imem->base.subdev.device)))
nv50_instobj_kmap(iobj, vm); nv50_instobj_kmap(iobj, vm);
if (!IS_ERR_OR_NULL(iobj->map)) if (!IS_ERR_OR_NULL(iobj->map))
return iobj->map; return iobj->map;
spin_lock_irqsave(&imem->lock, flags);
imem->lock_flags = flags;
return NULL; return NULL;
} }
...@@ -254,7 +253,6 @@ nv50_instmem_new(struct nvkm_device *device, int index, ...@@ -254,7 +253,6 @@ nv50_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base); nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
spin_lock_init(&imem->lock);
*pimem = &imem->base; *pimem = &imem->base;
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment