Commit f027f491 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/gpuobj: separate allocation from nvkm_object

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 227c95d9
...@@ -7,30 +7,33 @@ struct nvkm_vma; ...@@ -7,30 +7,33 @@ struct nvkm_vma;
struct nvkm_vm; struct nvkm_vm;
#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
#define NVOBJ_FLAG_ZERO_FREE 0x00000002
#define NVOBJ_FLAG_HEAP 0x00000004 #define NVOBJ_FLAG_HEAP 0x00000004
struct nvkm_gpuobj { struct nvkm_gpuobj {
struct nvkm_object object; struct nvkm_object object;
struct nvkm_memory *memory; const struct nvkm_gpuobj_func *func;
struct nvkm_gpuobj *parent; struct nvkm_gpuobj *parent;
struct nvkm_memory *memory;
struct nvkm_mm_node *node; struct nvkm_mm_node *node;
struct nvkm_mm heap;
u32 flags;
u64 addr; u64 addr;
u32 size; u32 size;
struct nvkm_mm heap;
const struct nvkm_gpuobj_func *func; void __iomem *map;
}; };
struct nvkm_gpuobj_func { struct nvkm_gpuobj_func {
void (*acquire)(struct nvkm_gpuobj *); void *(*acquire)(struct nvkm_gpuobj *);
void (*release)(struct nvkm_gpuobj *); void (*release)(struct nvkm_gpuobj *);
u32 (*rd32)(struct nvkm_gpuobj *, u32 offset); u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data); void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
}; };
int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
void nvkm_gpuobj_del(struct nvkm_gpuobj **);
static inline struct nvkm_gpuobj * static inline struct nvkm_gpuobj *
nv_gpuobj(void *obj) nv_gpuobj(void *obj)
{ {
...@@ -51,11 +54,8 @@ int nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *, ...@@ -51,11 +54,8 @@ int nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *,
u32 flags, int length, void **); u32 flags, int length, void **);
void nvkm_gpuobj_destroy(struct nvkm_gpuobj *); void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size, int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
u32 align, u32 flags, struct nvkm_gpuobj **); int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_memory *,
struct nvkm_gpuobj **);
int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
struct nvkm_vma *); struct nvkm_vma *);
void nvkm_gpuobj_unmap(struct nvkm_vma *); void nvkm_gpuobj_unmap(struct nvkm_vma *);
......
...@@ -15,7 +15,7 @@ struct nvkm_dmaeng { ...@@ -15,7 +15,7 @@ struct nvkm_dmaeng {
struct nvkm_engine engine; struct nvkm_engine engine;
/* creates a "physical" dma object from a struct nvkm_dmaobj */ /* creates a "physical" dma object from a struct nvkm_dmaobj */
int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent, int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **); struct nvkm_gpuobj **);
}; };
......
...@@ -141,7 +141,7 @@ nouveau_accel_fini(struct nouveau_drm *drm) ...@@ -141,7 +141,7 @@ nouveau_accel_fini(struct nouveau_drm *drm)
{ {
nouveau_channel_del(&drm->channel); nouveau_channel_del(&drm->channel);
nvif_object_fini(&drm->ntfy); nvif_object_fini(&drm->ntfy);
nvkm_gpuobj_ref(NULL, &drm->notify); nvkm_gpuobj_del(&drm->notify);
nvif_object_fini(&drm->nvsw); nvif_object_fini(&drm->nvsw);
nouveau_channel_del(&drm->cechan); nouveau_channel_del(&drm->cechan);
nvif_object_fini(&drm->ttm.copy); nvif_object_fini(&drm->ttm.copy);
...@@ -264,8 +264,8 @@ nouveau_accel_init(struct nouveau_drm *drm) ...@@ -264,8 +264,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
} }
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
ret = nvkm_gpuobj_new(nvxx_object(&drm->device.object), NULL, 32, ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
0, 0, &drm->notify); NULL, &drm->notify);
if (ret) { if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_fini(drm); nouveau_accel_fini(drm);
......
...@@ -28,74 +28,209 @@ ...@@ -28,74 +28,209 @@
#include <subdev/bar.h> #include <subdev/bar.h>
#include <subdev/mmu.h> #include <subdev/mmu.h>
/* fast-path, where backend is able to provide direct pointer to memory */
static u32
nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
{
return ioread32_native(gpuobj->map + offset);
}
static void static void
nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj) nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
{ {
if (gpuobj->node) { iowrite32_native(data, gpuobj->map + offset);
nvkm_done(gpuobj->parent); }
return;
} /* accessor functions for gpuobjs allocated directly from instmem */
nvkm_done(gpuobj->memory); static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
return nvkm_ro32(gpuobj->memory, offset);
} }
static void static void
nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj) nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
{ {
if (gpuobj->node) { nvkm_wo32(gpuobj->memory, offset, data);
nvkm_kmap(gpuobj->parent); }
return;
} static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
nvkm_kmap(gpuobj->memory); static void
nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
{
gpuobj->func = &nvkm_gpuobj_heap;
nvkm_done(gpuobj->memory);
}
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap_fast = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
};
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap_slow = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_heap_rd32,
.wr32 = nvkm_gpuobj_heap_wr32,
};
static void *
nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
{
gpuobj->map = nvkm_kmap(gpuobj->memory);
if (likely(gpuobj->map))
gpuobj->func = &nvkm_gpuobj_heap_fast;
else
gpuobj->func = &nvkm_gpuobj_heap_slow;
return gpuobj->map;
} }
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap = {
.acquire = nvkm_gpuobj_heap_acquire,
};
/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
static u32 static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{ {
if (gpuobj->node)
return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset); return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
return nvkm_ro32(gpuobj->memory, offset);
} }
static void static void
nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
{ {
if (gpuobj->node) {
nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data); nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
return; }
static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
static void
nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
{
gpuobj->func = &nvkm_gpuobj_func;
nvkm_done(gpuobj->parent);
}
static const struct nvkm_gpuobj_func
nvkm_gpuobj_fast = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
};
static const struct nvkm_gpuobj_func
nvkm_gpuobj_slow = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32,
};
static void *
nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
{
gpuobj->map = nvkm_kmap(gpuobj->parent);
if (likely(gpuobj->map)) {
gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset;
gpuobj->func = &nvkm_gpuobj_fast;
} else {
gpuobj->func = &nvkm_gpuobj_slow;
} }
nvkm_wo32(gpuobj->memory, offset, data); return gpuobj->map;
} }
void static const struct nvkm_gpuobj_func
nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj) nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire,
};
static int
nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
{ {
int i; u32 offset;
int ret;
if (parent) {
if (align >= 0) {
ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
max(align, 1), &gpuobj->node);
} else {
ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
-align, &gpuobj->node);
}
if (ret)
return ret;
if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) { gpuobj->parent = parent;
gpuobj->func = &nvkm_gpuobj_func;
gpuobj->addr = parent->addr + gpuobj->node->offset;
gpuobj->size = gpuobj->node->length;
if (zero) {
nvkm_kmap(gpuobj); nvkm_kmap(gpuobj);
for (i = 0; i < gpuobj->size; i += 4) for (offset = 0; offset < gpuobj->size; offset += 4)
nvkm_wo32(gpuobj, i, 0x00000000); nvkm_wo32(gpuobj, offset, 0x00000000);
nvkm_done(gpuobj); nvkm_done(gpuobj);
} }
} else {
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
abs(align), zero, &gpuobj->memory);
if (ret)
return ret;
gpuobj->func = &nvkm_gpuobj_heap;
gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
gpuobj->size = nvkm_memory_size(gpuobj->memory);
}
return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
}
void
nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj *gpuobj = *pgpuobj;
if (gpuobj) {
if (gpuobj->parent)
nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
nvkm_mm_fini(&gpuobj->heap);
nvkm_memory_del(&gpuobj->memory);
kfree(*pgpuobj);
*pgpuobj = NULL;
}
}
int
nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj *gpuobj;
int ret;
if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
return -ENOMEM;
ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
if (ret)
nvkm_gpuobj_del(pgpuobj);
return ret;
}
void
nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
{
if (gpuobj->node) if (gpuobj->node)
nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node); nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
if (gpuobj->heap.block_size) gpuobj->heap.block_size = 1;
nvkm_mm_fini(&gpuobj->heap); nvkm_mm_fini(&gpuobj->heap);
nvkm_memory_del(&gpuobj->memory); nvkm_memory_del(&gpuobj->memory);
nvkm_object_destroy(&gpuobj->object); nvkm_object_destroy(&gpuobj->object);
} }
static const struct nvkm_gpuobj_func
nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire,
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32,
};
int int
nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u32 pclass, struct nvkm_oclass *oclass, u32 pclass,
...@@ -103,12 +238,10 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -103,12 +238,10 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
int length, void **pobject) int length, void **pobject)
{ {
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = nv_device(parent);
struct nvkm_memory *memory = NULL;
struct nvkm_gpuobj *pargpu = NULL; struct nvkm_gpuobj *pargpu = NULL;
struct nvkm_gpuobj *gpuobj; struct nvkm_gpuobj *gpuobj;
struct nvkm_mm *heap = NULL; const bool zero = (flags & NVOBJ_FLAG_ZERO_ALLOC);
int ret, i; int ret;
u64 addr;
*pobject = NULL; *pobject = NULL;
...@@ -122,85 +255,20 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -122,85 +255,20 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if (WARN_ON(objgpu == NULL)) if (WARN_ON(objgpu == NULL))
return -EINVAL; return -EINVAL;
pargpu = nv_gpuobj(objgpu); pargpu = nv_gpuobj(objgpu);
addr = pargpu->addr;
heap = &pargpu->heap;
} else {
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
size, align, false, &memory);
if (ret)
return ret;
addr = nvkm_memory_addr(memory);
size = nvkm_memory_size(memory);
} }
ret = nvkm_object_create_(parent, engine, oclass, pclass | ret = nvkm_object_create_(parent, engine, oclass, pclass |
NV_GPUOBJ_CLASS, length, pobject); NV_GPUOBJ_CLASS, length, pobject);
gpuobj = *pobject; gpuobj = *pobject;
if (ret) {
nvkm_memory_del(&memory);
return ret;
}
gpuobj->func = &nvkm_gpuobj_func;
gpuobj->memory = memory;
gpuobj->parent = pargpu;
gpuobj->flags = flags;
gpuobj->addr = addr;
gpuobj->size = size;
if (heap) {
ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
&gpuobj->node);
if (ret) if (ret)
return ret; return ret;
gpuobj->addr += gpuobj->node->offset; ret = nvkm_gpuobj_ctor(device, size, align, zero, pargpu, gpuobj);
} if (!(flags & NVOBJ_FLAG_HEAP))
gpuobj->heap.block_size = 0;
if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
if (ret)
return ret;
}
if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
nvkm_kmap(gpuobj);
for (i = 0; i < gpuobj->size; i += 4)
nvkm_wo32(gpuobj, i, 0x00000000);
nvkm_done(gpuobj);
}
return ret; return ret;
} }
struct nvkm_gpuobj_class {
struct nvkm_object *pargpu;
u64 size;
u32 align;
u32 flags;
};
static int
_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_gpuobj_class *args = data;
struct nvkm_gpuobj *object;
int ret;
ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
args->size, args->align, args->flags,
&object);
*pobject = nv_object(object);
if (ret)
return ret;
return 0;
}
void void
_nvkm_gpuobj_dtor(struct nvkm_object *object) _nvkm_gpuobj_dtor(struct nvkm_object *object)
{ {
...@@ -233,38 +301,8 @@ _nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data) ...@@ -233,38 +301,8 @@ _nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
nvkm_wo32(gpuobj, addr, data); nvkm_wo32(gpuobj, addr, data);
} }
static struct nvkm_oclass
_nvkm_gpuobj_oclass = {
.handle = 0x00000000,
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_gpuobj_ctor,
.dtor = _nvkm_gpuobj_dtor,
.init = _nvkm_gpuobj_init,
.fini = _nvkm_gpuobj_fini,
.rd32 = _nvkm_gpuobj_rd32,
.wr32 = _nvkm_gpuobj_wr32,
},
};
int
nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
u32 size, u32 align, u32 flags,
struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj_class args = {
.pargpu = pargpu,
.size = size,
.align = align,
.flags = flags,
};
return nvkm_object_old(parent, &parent->engine->subdev.object,
&_nvkm_gpuobj_oclass, &args, sizeof(args),
(struct nvkm_object **)pgpuobj);
}
int int
nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm, nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
u32 access, struct nvkm_vma *vma) u32 access, struct nvkm_vma *vma)
{ {
struct nvkm_memory *memory = gpuobj->memory; struct nvkm_memory *memory = gpuobj->memory;
...@@ -288,37 +326,13 @@ nvkm_gpuobj_unmap(struct nvkm_vma *vma) ...@@ -288,37 +326,13 @@ nvkm_gpuobj_unmap(struct nvkm_vma *vma)
* anywhere else. * anywhere else.
*/ */
static void
nvkm_gpudup_dtor(struct nvkm_object *object)
{
struct nvkm_gpuobj *gpuobj = (void *)object;
nvkm_object_destroy(&gpuobj->object);
}
static struct nvkm_oclass
nvkm_gpudup_oclass = {
.handle = NV_GPUOBJ_CLASS,
.ofuncs = &(struct nvkm_ofuncs) {
.dtor = nvkm_gpudup_dtor,
.init = _nvkm_object_init,
.fini = _nvkm_object_fini,
},
};
int int
nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_memory *base, nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
struct nvkm_gpuobj **pgpuobj)
{ {
struct nvkm_gpuobj *gpuobj; if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
int ret; return -ENOMEM;
ret = nvkm_object_create(parent, &parent->engine->subdev.object,
&nvkm_gpudup_oclass, 0, &gpuobj);
*pgpuobj = gpuobj;
if (ret)
return ret;
gpuobj->addr = nvkm_memory_addr(base); (*pgpuobj)->addr = nvkm_memory_addr(memory);
gpuobj->size = nvkm_memory_size(base); (*pgpuobj)->size = nvkm_memory_size(memory);
return 0; return 0;
} }
...@@ -24,32 +24,74 @@ ...@@ -24,32 +24,74 @@
#include "priv.h" #include "priv.h"
#include <core/client.h> #include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/instmem.h> #include <subdev/instmem.h>
#include <nvif/class.h> #include <nvif/class.h>
#include <nvif/unpack.h> #include <nvif/unpack.h>
struct hack {
struct nvkm_gpuobj object;
struct nvkm_gpuobj *parent;
};
static void
dtor(struct nvkm_object *object)
{
struct hack *hack = (void *)object;
nvkm_gpuobj_del(&hack->parent);
nvkm_object_destroy(&hack->object.object);
}
static struct nvkm_oclass
hack = {
.handle = NV_GPUOBJ_CLASS,
.ofuncs = &(struct nvkm_ofuncs) {
.dtor = dtor,
.init = _nvkm_object_init,
.fini = _nvkm_object_fini,
},
};
static int static int
nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent, nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_gpuobj *pargpu,
struct nvkm_gpuobj **pgpuobj) struct nvkm_gpuobj **pgpuobj)
{ {
const struct nvkm_dmaeng_impl *impl = (void *) const struct nvkm_dmaeng_impl *impl = (void *)
nv_oclass(nv_object(dmaobj)->engine); nv_oclass(nv_object(dmaobj)->engine);
int ret = 0; int ret = 0;
if (nv_object(dmaobj) == parent) { /* ctor bind */ if (&dmaobj->base == &pargpu->object) { /* ctor bind */
struct nvkm_object *parent = (void *)pargpu;
struct hack *object;
if (nv_mclass(parent->parent) == NV_DEVICE) { if (nv_mclass(parent->parent) == NV_DEVICE) {
/* delayed, or no, binding */ /* delayed, or no, binding */
return 0; return 0;
} }
ret = impl->bind(dmaobj, parent, pgpuobj);
if (ret == 0) pargpu = (void *)nv_pclass((void *)pargpu, NV_GPUOBJ_CLASS);
ret = nvkm_object_create(parent, NULL, &hack, NV_GPUOBJ_CLASS, &object);
if (ret == 0) {
nvkm_object_ref(NULL, &parent); nvkm_object_ref(NULL, &parent);
*pgpuobj = &object->object;
ret = impl->bind(dmaobj, pargpu, &object->parent);
if (ret)
return ret;
object->object.node = object->parent->node;
object->object.addr = object->parent->addr;
object->object.size = object->parent->size;
return 0;
}
return ret; return ret;
} }
return impl->bind(dmaobj, parent, pgpuobj); return impl->bind(dmaobj, pargpu, pgpuobj);
} }
int int
......
...@@ -37,25 +37,14 @@ struct gf100_dmaobj { ...@@ -37,25 +37,14 @@ struct gf100_dmaobj {
}; };
static int static int
gf100_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent, gf100_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **pgpuobj) struct nvkm_gpuobj **pgpuobj)
{ {
struct gf100_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base); struct gf100_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
int ret; int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
switch (nv_mclass(parent->parent)) {
case GT214_DISP_CORE_CHANNEL_DMA:
case GT214_DISP_BASE_CHANNEL_DMA:
case GT214_DISP_OVERLAY_CHANNEL_DMA:
break;
default:
return -EINVAL;
}
} else
return 0;
ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) { if (ret == 0) {
nvkm_kmap(*pgpuobj); nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj)); nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
...@@ -146,7 +135,7 @@ gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -146,7 +135,7 @@ gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
break; break;
} }
return dmaeng->bind(&dmaobj->base, nv_object(dmaobj), (void *)pobject); return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
} }
static struct nvkm_ofuncs static struct nvkm_ofuncs
......
...@@ -36,32 +36,14 @@ struct gf110_dmaobj { ...@@ -36,32 +36,14 @@ struct gf110_dmaobj {
}; };
static int static int
gf110_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent, gf110_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **pgpuobj) struct nvkm_gpuobj **pgpuobj)
{ {
struct gf110_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base); struct gf110_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
int ret; int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
switch (nv_mclass(parent->parent)) {
case GF110_DISP_CORE_CHANNEL_DMA:
case GK104_DISP_CORE_CHANNEL_DMA:
case GK110_DISP_CORE_CHANNEL_DMA:
case GM107_DISP_CORE_CHANNEL_DMA:
case GM204_DISP_CORE_CHANNEL_DMA:
case GF110_DISP_BASE_CHANNEL_DMA:
case GK104_DISP_BASE_CHANNEL_DMA:
case GK110_DISP_BASE_CHANNEL_DMA:
case GF110_DISP_OVERLAY_CONTROL_DMA:
case GK104_DISP_OVERLAY_CONTROL_DMA:
break;
default:
return -EINVAL;
}
} else
return 0;
ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) { if (ret == 0) {
nvkm_kmap(*pgpuobj); nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0); nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
...@@ -135,7 +117,7 @@ gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -135,7 +117,7 @@ gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return -EINVAL; return -EINVAL;
} }
return dmaeng->bind(&dmaobj->base, nv_object(dmaobj), (void *)pobject); return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
} }
static struct nvkm_ofuncs static struct nvkm_ofuncs
......
...@@ -37,41 +37,28 @@ struct nv04_dmaobj { ...@@ -37,41 +37,28 @@ struct nv04_dmaobj {
}; };
static int static int
nv04_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent, nv04_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **pgpuobj) struct nvkm_gpuobj **pgpuobj)
{ {
struct nv04_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base); struct nv04_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
struct nvkm_gpuobj *gpuobj; struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
u64 offset = dmaobj->base.start & 0xfffff000; u64 offset = dmaobj->base.start & 0xfffff000;
u64 adjust = dmaobj->base.start & 0x00000fff; u64 adjust = dmaobj->base.start & 0x00000fff;
u32 length = dmaobj->base.limit - dmaobj->base.start; u32 length = dmaobj->base.limit - dmaobj->base.start;
int ret; int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
case NV03_CHANNEL_DMA:
case NV10_CHANNEL_DMA:
case NV17_CHANNEL_DMA:
case NV40_CHANNEL_DMA:
break;
default:
return -EINVAL;
}
}
if (dmaobj->clone) { if (dmaobj->clone) {
struct nv04_mmu *mmu = nv04_mmu(dmaobj); struct nv04_mmu *mmu = nv04_mmu(dmaobj);
struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0]; struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
if (!dmaobj->base.start) if (!dmaobj->base.start)
return nvkm_gpuobj_dup(parent, pgt, pgpuobj); return nvkm_gpuobj_wrap(pgt, pgpuobj);
nvkm_kmap(pgt); nvkm_kmap(pgt);
offset = nvkm_ro32(pgt, 8 + (offset >> 10)); offset = nvkm_ro32(pgt, 8 + (offset >> 10));
offset &= 0xfffff000; offset &= 0xfffff000;
nvkm_done(pgt); nvkm_done(pgt);
} }
ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj); ret = nvkm_gpuobj_new(device, 16, 16, false, parent, pgpuobj);
*pgpuobj = gpuobj;
if (ret == 0) { if (ret == 0) {
nvkm_kmap(*pgpuobj); nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20)); nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
...@@ -134,7 +121,7 @@ nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -134,7 +121,7 @@ nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return -EINVAL; return -EINVAL;
} }
return dmaeng->bind(&dmaobj->base, nv_object(dmaobj), (void *)pobject); return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
} }
static struct nvkm_ofuncs static struct nvkm_ofuncs
......
...@@ -37,37 +37,14 @@ struct nv50_dmaobj { ...@@ -37,37 +37,14 @@ struct nv50_dmaobj {
}; };
static int static int
nv50_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent, nv50_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **pgpuobj) struct nvkm_gpuobj **pgpuobj)
{ {
struct nv50_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base); struct nv50_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
int ret; int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
switch (nv_mclass(parent->parent)) {
case NV40_CHANNEL_DMA:
case NV50_CHANNEL_GPFIFO:
case G82_CHANNEL_GPFIFO:
case NV50_DISP_CORE_CHANNEL_DMA:
case G82_DISP_CORE_CHANNEL_DMA:
case GT206_DISP_CORE_CHANNEL_DMA:
case GT200_DISP_CORE_CHANNEL_DMA:
case GT214_DISP_CORE_CHANNEL_DMA:
case NV50_DISP_BASE_CHANNEL_DMA:
case G82_DISP_BASE_CHANNEL_DMA:
case GT200_DISP_BASE_CHANNEL_DMA:
case GT214_DISP_BASE_CHANNEL_DMA:
case NV50_DISP_OVERLAY_CHANNEL_DMA:
case G82_DISP_OVERLAY_CHANNEL_DMA:
case GT200_DISP_OVERLAY_CHANNEL_DMA:
case GT214_DISP_OVERLAY_CHANNEL_DMA:
break;
default:
return -EINVAL;
}
}
ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) { if (ret == 0) {
nvkm_kmap(*pgpuobj); nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj)); nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
...@@ -164,7 +141,7 @@ nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -164,7 +141,7 @@ nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return -EINVAL; return -EINVAL;
} }
return dmaeng->bind(&dmaobj->base, nv_object(dmaobj), (void *)pobject); return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
} }
static struct nvkm_ofuncs static struct nvkm_ofuncs
......
...@@ -22,7 +22,7 @@ int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *, ...@@ -22,7 +22,7 @@ int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *,
struct nvkm_dmaeng_impl { struct nvkm_dmaeng_impl {
struct nvkm_oclass base; struct nvkm_oclass base;
struct nvkm_oclass *sclass; struct nvkm_oclass *sclass;
int (*bind)(struct nvkm_dmaobj *, struct nvkm_object *, int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *,
struct nvkm_gpuobj **); struct nvkm_gpuobj **);
}; };
#endif #endif
...@@ -61,6 +61,7 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent, ...@@ -61,6 +61,7 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent,
struct nvkm_handle *handle; struct nvkm_handle *handle;
struct nvkm_dmaobj *dmaobj; struct nvkm_dmaobj *dmaobj;
struct nvkm_fifo *fifo = (void *)engine; struct nvkm_fifo *fifo = (void *)engine;
struct nvkm_fifo_base *base = (void *)parent;
struct nvkm_fifo_chan *chan; struct nvkm_fifo_chan *chan;
struct nvkm_dmaeng *dmaeng; struct nvkm_dmaeng *dmaeng;
struct nvkm_subdev *subdev = &fifo->engine.subdev; struct nvkm_subdev *subdev = &fifo->engine.subdev;
...@@ -91,7 +92,7 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent, ...@@ -91,7 +92,7 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent,
return -EINVAL; return -EINVAL;
} }
ret = dmaeng->bind(dmaobj, parent, &chan->pushgpu); ret = dmaeng->bind(dmaobj, &base->gpuobj, &chan->pushgpu);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -131,7 +132,7 @@ nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan) ...@@ -131,7 +132,7 @@ nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
fifo->channel[chan->chid] = NULL; fifo->channel[chan->chid] = NULL;
spin_unlock_irqrestore(&fifo->lock, flags); spin_unlock_irqrestore(&fifo->lock, flags);
nvkm_gpuobj_ref(NULL, &chan->pushgpu); nvkm_gpuobj_del(&chan->pushgpu);
nvkm_namedb_destroy(&chan->namedb); nvkm_namedb_destroy(&chan->namedb);
} }
......
...@@ -140,6 +140,7 @@ g84_fifo_object_attach(struct nvkm_object *parent, ...@@ -140,6 +140,7 @@ g84_fifo_object_attach(struct nvkm_object *parent,
else else
context = 0x00000004; /* just non-zero */ context = 0x00000004; /* just non-zero */
if (object->engine) {
switch (nv_engidx(object->engine)) { switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ: case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW : context |= 0x00000000; break; case NVDEV_ENGINE_SW : context |= 0x00000000; break;
...@@ -158,6 +159,7 @@ g84_fifo_object_attach(struct nvkm_object *parent, ...@@ -158,6 +159,7 @@ g84_fifo_object_attach(struct nvkm_object *parent,
default: default:
return -EINVAL; return -EINVAL;
} }
}
return nvkm_ramht_insert(chan->ramht, 0, handle, context); return nvkm_ramht_insert(chan->ramht, 0, handle, context);
} }
...@@ -374,6 +376,7 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -374,6 +376,7 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = nv_engine(engine)->subdev.device;
struct nv50_fifo_base *base; struct nv50_fifo_base *base;
int ret; int ret;
...@@ -383,13 +386,13 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -383,13 +386,13 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0, ret = nvkm_gpuobj_new(device, 0x0200, 0, true, &base->base.gpuobj,
NVOBJ_FLAG_ZERO_ALLOC, &base->eng); &base->eng);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
0, &base->pgd); &base->pgd);
if (ret) if (ret)
return ret; return ret;
...@@ -397,13 +400,13 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -397,13 +400,13 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000, ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, &base->base.gpuobj,
0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache); &base->cache);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100, ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, &base->base.gpuobj,
0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); &base->ramfc);
if (ret) if (ret)
return ret; return ret;
......
...@@ -130,7 +130,7 @@ gf100_fifo_context_attach(struct nvkm_object *parent, ...@@ -130,7 +130,7 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
} }
if (!ectx->vma.node) { if (!ectx->vma.node) {
ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
NV_MEM_ACCESS_RW, &ectx->vma); NV_MEM_ACCESS_RW, &ectx->vma);
if (ret) if (ret)
return ret; return ret;
...@@ -334,6 +334,7 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -334,6 +334,7 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = nv_engine(engine)->subdev.device;
struct gf100_fifo_base *base; struct gf100_fifo_base *base;
int ret; int ret;
...@@ -344,8 +345,7 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -344,8 +345,7 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
&base->pgd);
if (ret) if (ret)
return ret; return ret;
...@@ -368,7 +368,7 @@ gf100_fifo_context_dtor(struct nvkm_object *object) ...@@ -368,7 +368,7 @@ gf100_fifo_context_dtor(struct nvkm_object *object)
{ {
struct gf100_fifo_base *base = (void *)object; struct gf100_fifo_base *base = (void *)object;
nvkm_vm_ref(NULL, &base->vm, base->pgd); nvkm_vm_ref(NULL, &base->vm, base->pgd);
nvkm_gpuobj_ref(NULL, &base->pgd); nvkm_gpuobj_del(&base->pgd);
nvkm_fifo_context_destroy(&base->base); nvkm_fifo_context_destroy(&base->base);
} }
......
...@@ -154,7 +154,7 @@ gk104_fifo_context_attach(struct nvkm_object *parent, ...@@ -154,7 +154,7 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
} }
if (!ectx->vma.node) { if (!ectx->vma.node) {
ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
NV_MEM_ACCESS_RW, &ectx->vma); NV_MEM_ACCESS_RW, &ectx->vma);
if (ret) if (ret)
return ret; return ret;
...@@ -388,6 +388,7 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -388,6 +388,7 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = nv_engine(engine)->subdev.device;
struct gk104_fifo_base *base; struct gk104_fifo_base *base;
int ret; int ret;
...@@ -397,8 +398,7 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -397,8 +398,7 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
&base->pgd);
if (ret) if (ret)
return ret; return ret;
...@@ -421,7 +421,7 @@ gk104_fifo_context_dtor(struct nvkm_object *object) ...@@ -421,7 +421,7 @@ gk104_fifo_context_dtor(struct nvkm_object *object)
{ {
struct gk104_fifo_base *base = (void *)object; struct gk104_fifo_base *base = (void *)object;
nvkm_vm_ref(NULL, &base->vm, base->pgd); nvkm_vm_ref(NULL, &base->vm, base->pgd);
nvkm_gpuobj_ref(NULL, &base->pgd); nvkm_gpuobj_del(&base->pgd);
nvkm_fifo_context_destroy(&base->base); nvkm_fifo_context_destroy(&base->base);
} }
......
...@@ -65,6 +65,7 @@ nv04_fifo_object_attach(struct nvkm_object *parent, ...@@ -65,6 +65,7 @@ nv04_fifo_object_attach(struct nvkm_object *parent,
else else
context = 0x00000004; /* just non-zero */ context = 0x00000004; /* just non-zero */
if (object->engine) {
switch (nv_engidx(object->engine)) { switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ: case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW: case NVDEV_ENGINE_SW:
...@@ -79,6 +80,7 @@ nv04_fifo_object_attach(struct nvkm_object *parent, ...@@ -79,6 +80,7 @@ nv04_fifo_object_attach(struct nvkm_object *parent,
default: default:
return -EINVAL; return -EINVAL;
} }
}
context |= 0x80000000; /* valid */ context |= 0x80000000; /* valid */
context |= chid << 24; context |= chid << 24;
......
...@@ -78,6 +78,7 @@ nv40_fifo_object_attach(struct nvkm_object *parent, ...@@ -78,6 +78,7 @@ nv40_fifo_object_attach(struct nvkm_object *parent,
else else
context = 0x00000004; /* just non-zero */ context = 0x00000004; /* just non-zero */
if (object->engine) {
switch (nv_engidx(object->engine)) { switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ: case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW: case NVDEV_ENGINE_SW:
...@@ -92,6 +93,7 @@ nv40_fifo_object_attach(struct nvkm_object *parent, ...@@ -92,6 +93,7 @@ nv40_fifo_object_attach(struct nvkm_object *parent,
default: default:
return -EINVAL; return -EINVAL;
} }
}
context |= chid << 23; context |= chid << 23;
......
...@@ -171,6 +171,7 @@ nv50_fifo_object_attach(struct nvkm_object *parent, ...@@ -171,6 +171,7 @@ nv50_fifo_object_attach(struct nvkm_object *parent,
else else
context = 0x00000004; /* just non-zero */ context = 0x00000004; /* just non-zero */
if (object->engine) {
switch (nv_engidx(object->engine)) { switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ: case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW : context |= 0x00000000; break; case NVDEV_ENGINE_SW : context |= 0x00000000; break;
...@@ -179,6 +180,7 @@ nv50_fifo_object_attach(struct nvkm_object *parent, ...@@ -179,6 +180,7 @@ nv50_fifo_object_attach(struct nvkm_object *parent,
default: default:
return -EINVAL; return -EINVAL;
} }
}
return nvkm_ramht_insert(chan->ramht, 0, handle, context); return nvkm_ramht_insert(chan->ramht, 0, handle, context);
} }
...@@ -402,6 +404,7 @@ nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -402,6 +404,7 @@ nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = nv_engine(engine)->subdev.device;
struct nv50_fifo_base *base; struct nv50_fifo_base *base;
int ret; int ret;
...@@ -411,17 +414,17 @@ nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -411,17 +414,17 @@ nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, &base->base.gpuobj,
0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); &base->ramfc);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0, ret = nvkm_gpuobj_new(device, 0x1200, 0, true, &base->base.gpuobj,
NVOBJ_FLAG_ZERO_ALLOC, &base->eng); &base->eng);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0, ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
&base->pgd); &base->pgd);
if (ret) if (ret)
return ret; return ret;
...@@ -438,10 +441,10 @@ nv50_fifo_context_dtor(struct nvkm_object *object) ...@@ -438,10 +441,10 @@ nv50_fifo_context_dtor(struct nvkm_object *object)
{ {
struct nv50_fifo_base *base = (void *)object; struct nv50_fifo_base *base = (void *)object;
nvkm_vm_ref(NULL, &base->vm, base->pgd); nvkm_vm_ref(NULL, &base->vm, base->pgd);
nvkm_gpuobj_ref(NULL, &base->pgd); nvkm_gpuobj_del(&base->pgd);
nvkm_gpuobj_ref(NULL, &base->eng); nvkm_gpuobj_del(&base->eng);
nvkm_gpuobj_ref(NULL, &base->ramfc); nvkm_gpuobj_del(&base->ramfc);
nvkm_gpuobj_ref(NULL, &base->cache); nvkm_gpuobj_del(&base->cache);
nvkm_fifo_context_destroy(&base->base); nvkm_fifo_context_destroy(&base->base);
} }
......
...@@ -75,8 +75,7 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm, ...@@ -75,8 +75,7 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(bar), NULL, 0x8000, 0, 0, ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
&bar_vm->pgd);
if (ret) if (ret)
return ret; return ret;
...@@ -157,14 +156,14 @@ gf100_bar_dtor(struct nvkm_object *object) ...@@ -157,14 +156,14 @@ gf100_bar_dtor(struct nvkm_object *object)
struct gf100_bar *bar = (void *)object; struct gf100_bar *bar = (void *)object;
nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd); nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
nvkm_gpuobj_ref(NULL, &bar->bar[1].pgd); nvkm_gpuobj_del(&bar->bar[1].pgd);
nvkm_memory_del(&bar->bar[1].mem); nvkm_memory_del(&bar->bar[1].mem);
if (bar->bar[0].vm) { if (bar->bar[0].vm) {
nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]); nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd); nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
} }
nvkm_gpuobj_ref(NULL, &bar->bar[0].pgd); nvkm_gpuobj_del(&bar->bar[0].pgd);
nvkm_memory_del(&bar->bar[0].mem); nvkm_memory_del(&bar->bar[0].mem);
nvkm_bar_destroy(&bar->base); nvkm_bar_destroy(&bar->base);
......
...@@ -99,7 +99,6 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -99,7 +99,6 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
static struct lock_class_key bar1_lock; static struct lock_class_key bar1_lock;
static struct lock_class_key bar3_lock; static struct lock_class_key bar3_lock;
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = nv_device(parent);
struct nvkm_object *heap;
struct nvkm_vm *vm; struct nvkm_vm *vm;
struct nv50_bar *bar; struct nv50_bar *bar;
u64 start, limit; u64 start, limit;
...@@ -110,19 +109,17 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -110,19 +109,17 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(bar), NULL, 0x20000, 0, ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
NVOBJ_FLAG_HEAP, &bar->mem);
heap = nv_object(bar->mem);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(bar), heap, ret = nvkm_gpuobj_new(device, (device->chipset == 0x50) ?
(device->chipset == 0x50) ? 0x1400 : 0x0200, 0x1400 : 0x200, 0, false, bar->mem,
0, 0, &bar->pad); &bar->pad);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(bar), heap, 0x4000, 0, 0, &bar->pgd); ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
if (ret) if (ret)
return ret; return ret;
...@@ -145,7 +142,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -145,7 +142,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar3); ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
if (ret) if (ret)
return ret; return ret;
...@@ -174,7 +171,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -174,7 +171,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar1); ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
if (ret) if (ret)
return ret; return ret;
...@@ -203,16 +200,16 @@ static void ...@@ -203,16 +200,16 @@ static void
nv50_bar_dtor(struct nvkm_object *object) nv50_bar_dtor(struct nvkm_object *object)
{ {
struct nv50_bar *bar = (void *)object; struct nv50_bar *bar = (void *)object;
nvkm_gpuobj_ref(NULL, &bar->bar1); nvkm_gpuobj_del(&bar->bar1);
nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd); nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
nvkm_gpuobj_ref(NULL, &bar->bar3); nvkm_gpuobj_del(&bar->bar3);
if (bar->bar3_vm) { if (bar->bar3_vm) {
nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]); nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd); nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
} }
nvkm_gpuobj_ref(NULL, &bar->pgd); nvkm_gpuobj_del(&bar->pgd);
nvkm_gpuobj_ref(NULL, &bar->pad); nvkm_gpuobj_del(&bar->pad);
nvkm_gpuobj_ref(NULL, &bar->mem); nvkm_gpuobj_del(&bar->mem);
nvkm_bar_destroy(&bar->base); nvkm_bar_destroy(&bar->base);
} }
......
...@@ -420,7 +420,7 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) ...@@ -420,7 +420,7 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
if (!vpgd) if (!vpgd)
return -ENOMEM; return -ENOMEM;
nvkm_gpuobj_ref(pgd, &vpgd->obj); vpgd->obj = pgd;
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
for (i = vm->fpde; i <= vm->lpde; i++) for (i = vm->fpde; i <= vm->lpde; i++)
...@@ -434,7 +434,6 @@ static void ...@@ -434,7 +434,6 @@ static void
nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
{ {
struct nvkm_vm_pgd *vpgd, *tmp; struct nvkm_vm_pgd *vpgd, *tmp;
struct nvkm_gpuobj *pgd = NULL;
if (!mpgd) if (!mpgd)
return; return;
...@@ -442,15 +441,12 @@ nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) ...@@ -442,15 +441,12 @@ nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj == mpgd) { if (vpgd->obj == mpgd) {
pgd = vpgd->obj;
list_del(&vpgd->head); list_del(&vpgd->head);
kfree(vpgd); kfree(vpgd);
break; break;
} }
} }
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
nvkm_gpuobj_ref(NULL, &pgd);
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment