Commit 8f0649b5 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo: convert user classes to new-style nvkm_object

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 9a65a38c
...@@ -64,6 +64,6 @@ u64 nvif_device_time(struct nvif_device *); ...@@ -64,6 +64,6 @@ u64 nvif_device_time(struct nvif_device *);
#include <engine/gr.h> #include <engine/gr.h>
#include <engine/sw.h> #include <engine/sw.h>
#define nvxx_fifo(a) nvkm_fifo(nvxx_device(a)) #define nvxx_fifo(a) nvxx_device(a)->fifo
#define nvxx_gr(a) nvkm_gr(nvxx_device(a)) #define nvxx_gr(a) nvkm_gr(nvxx_device(a))
#endif #endif
...@@ -45,7 +45,4 @@ int _nvkm_engctx_init(struct nvkm_object *); ...@@ -45,7 +45,4 @@ int _nvkm_engctx_init(struct nvkm_object *);
int _nvkm_engctx_fini(struct nvkm_object *, bool suspend); int _nvkm_engctx_fini(struct nvkm_object *, bool suspend);
#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32 #define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32 #define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr);
void nvkm_engctx_put(struct nvkm_object *);
#endif #endif
...@@ -42,6 +42,7 @@ struct nvkm_engine_func { ...@@ -42,6 +42,7 @@ struct nvkm_engine_func {
int (*sclass)(struct nvkm_oclass *, int index); int (*sclass)(struct nvkm_oclass *, int index);
} fifo; } fifo;
const struct nvkm_object_func *cclass;
struct nvkm_sclass sclass[]; struct nvkm_sclass sclass[];
}; };
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
struct nvkm_object; struct nvkm_object;
struct nvkm_handle { struct nvkm_handle {
struct nvkm_namedb *namedb;
struct list_head node; struct list_head node;
struct list_head head; struct list_head head;
......
#ifndef __NVKM_NAMEDB_H__
#define __NVKM_NAMEDB_H__
#include <core/parent.h>
struct nvkm_handle;
struct nvkm_namedb {
struct nvkm_parent parent;
rwlock_t lock;
struct list_head list;
};
static inline struct nvkm_namedb *
nv_namedb(void *obj)
{
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
BUG_ON(!nv_iclass(obj, NV_NAMEDB_CLASS));
#endif
return obj;
}
#define nvkm_namedb_create(p,e,c,v,s,m,d) \
nvkm_namedb_create_((p), (e), (c), (v), (s), (m), \
sizeof(**d), (void **)d)
#define nvkm_namedb_init(p) \
nvkm_parent_init(&(p)->parent)
#define nvkm_namedb_fini(p,s) \
nvkm_parent_fini(&(p)->parent, (s))
#define nvkm_namedb_destroy(p) \
nvkm_parent_destroy(&(p)->parent)
int nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, u32 pclass,
struct nvkm_oclass *, u64 engcls,
int size, void **);
int _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
#define _nvkm_namedb_dtor _nvkm_parent_dtor
#define _nvkm_namedb_init _nvkm_parent_init
#define _nvkm_namedb_fini _nvkm_parent_fini
int nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *,
struct nvkm_handle *);
void nvkm_namedb_remove(struct nvkm_handle *);
struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32);
struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, s32);
struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64);
struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32);
void nvkm_namedb_put(struct nvkm_handle *);
#endif
#ifndef __NVKM_FIFO_H__ #ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__ #define __NVKM_FIFO_H__
#include <core/namedb.h> #define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
#include <core/engine.h>
#include <core/event.h>
#define NVKM_FIFO_CHID_NR 4096
struct nvkm_fifo_engn {
struct nvkm_object *object;
int refcount;
int usecount;
};
struct nvkm_fifo_chan { struct nvkm_fifo_chan {
struct nvkm_namedb namedb; const struct nvkm_fifo_chan_func *func;
struct nvkm_gpuobj *pushgpu; struct nvkm_fifo *fifo;
u64 engines;
struct nvkm_object object;
struct list_head head;
u16 chid;
struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *push;
struct nvkm_vm *vm;
void __iomem *user; void __iomem *user;
u64 addr; u64 addr;
u32 size; u32 size;
u16 chid;
u64 inst; struct nvkm_fifo_engn engn[NVDEV_SUBDEV_NR];
}; };
static inline struct nvkm_fifo_chan * extern const struct nvkm_object_func nvkm_fifo_chan_func;
nvkm_fifo_chan(void *obj)
{
return (void *)nv_namedb(obj);
}
#include <core/gpuobj.h> #include <core/gpuobj.h>
struct nvkm_fifo_base { struct nvkm_fifo_base {
struct nvkm_gpuobj gpuobj; struct nvkm_gpuobj gpuobj;
}; };
...@@ -39,25 +53,27 @@ struct nvkm_fifo_base { ...@@ -39,25 +53,27 @@ struct nvkm_fifo_base {
#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32 #define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32 #define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
#include <core/engine.h>
#include <core/event.h>
struct nvkm_fifo { struct nvkm_fifo {
struct nvkm_engine engine; struct nvkm_engine engine;
const struct nvkm_fifo_func *func;
struct nvkm_event cevent; /* channel creation event */ struct nvkm_event cevent; /* channel creation event */
struct nvkm_event uevent; /* async user trigger */ struct nvkm_event uevent; /* async user trigger */
struct nvkm_object **channel; DECLARE_BITMAP(mask, NVKM_FIFO_CHID_NR);
int nr;
struct list_head chan;
spinlock_t lock; spinlock_t lock;
u16 min;
u16 max;
int (*chid)(struct nvkm_fifo *, struct nvkm_object *);
void (*pause)(struct nvkm_fifo *, unsigned long *); void (*pause)(struct nvkm_fifo *, unsigned long *);
void (*start)(struct nvkm_fifo *, unsigned long *); void (*start)(struct nvkm_fifo *, unsigned long *);
}; };
struct nvkm_fifo_func {
void *(*dtor)(struct nvkm_fifo *);
const struct nvkm_fifo_chan_oclass *chan[];
};
void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags, void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags,
struct nvkm_fifo_chan **); struct nvkm_fifo_chan **);
struct nvkm_fifo_chan * struct nvkm_fifo_chan *
...@@ -65,12 +81,6 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags); ...@@ -65,12 +81,6 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
struct nvkm_fifo_chan * struct nvkm_fifo_chan *
nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags); nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
static inline struct nvkm_fifo *
nvkm_fifo(void *obj)
{
return (void *)nvkm_engine(obj, NVDEV_ENGINE_FIFO);
}
#define nvkm_fifo_create(o,e,c,fc,lc,d) \ #define nvkm_fifo_create(o,e,c,fc,lc,d) \
nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d) nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
#define nvkm_fifo_init(p) \ #define nvkm_fifo_init(p) \
...@@ -82,8 +92,6 @@ int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *, ...@@ -82,8 +92,6 @@ int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, int min, int max, struct nvkm_oclass *, int min, int max,
int size, void **); int size, void **);
void nvkm_fifo_destroy(struct nvkm_fifo *); void nvkm_fifo_destroy(struct nvkm_fifo *);
const char *
nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid);
#define _nvkm_fifo_init _nvkm_engine_init #define _nvkm_fifo_init _nvkm_engine_init
#define _nvkm_fifo_fini _nvkm_engine_fini #define _nvkm_fifo_fini _nvkm_engine_fini
......
...@@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm) ...@@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv84_fence_context_new; priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del; priv->base.context_del = nv84_fence_context_del;
priv->base.contexts = fifo->max + 1; priv->base.contexts = fifo->nr;
priv->base.context_base = fence_context_alloc(priv->base.contexts); priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true; priv->base.uevent = true;
......
...@@ -8,7 +8,6 @@ nvkm-y += nvkm/core/handle.o ...@@ -8,7 +8,6 @@ nvkm-y += nvkm/core/handle.o
nvkm-y += nvkm/core/ioctl.o nvkm-y += nvkm/core/ioctl.o
nvkm-y += nvkm/core/memory.o nvkm-y += nvkm/core/memory.o
nvkm-y += nvkm/core/mm.o nvkm-y += nvkm/core/mm.o
nvkm-y += nvkm/core/namedb.o
nvkm-y += nvkm/core/notify.o nvkm-y += nvkm/core/notify.o
nvkm-y += nvkm/core/object.o nvkm-y += nvkm/core/object.o
nvkm-y += nvkm/core/oproxy.o nvkm-y += nvkm/core/oproxy.o
......
...@@ -124,58 +124,12 @@ nvkm_engctx_destroy(struct nvkm_engctx *engctx) ...@@ -124,58 +124,12 @@ nvkm_engctx_destroy(struct nvkm_engctx *engctx)
int int
nvkm_engctx_init(struct nvkm_engctx *engctx) nvkm_engctx_init(struct nvkm_engctx *engctx)
{ {
struct nvkm_object *object = nv_object(engctx); return nvkm_gpuobj_init(&engctx->gpuobj);
struct nvkm_subdev *subdev = nv_subdev(object->engine);
struct nvkm_object *parent;
struct nvkm_subdev *pardev;
int ret;
ret = nvkm_gpuobj_init(&engctx->gpuobj);
if (ret)
return ret;
parent = nv_pclass(object->parent, NV_PARENT_CLASS);
pardev = nv_subdev(parent->engine);
if (nv_parent(parent)->context_attach) {
mutex_lock(&pardev->mutex);
ret = nv_parent(parent)->context_attach(parent, object);
mutex_unlock(&pardev->mutex);
}
if (ret) {
nvkm_error(pardev, "failed to attach %s context, %d\n",
nvkm_subdev_name[subdev->index], ret);
return ret;
}
nvkm_trace(pardev, "attached %s context\n", nvkm_subdev_name[subdev->index]);
return 0;
} }
int int
nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend) nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
{ {
struct nvkm_object *object = nv_object(engctx);
struct nvkm_subdev *subdev = nv_subdev(object->engine);
struct nvkm_object *parent;
struct nvkm_subdev *pardev;
int ret = 0;
parent = nv_pclass(object->parent, NV_PARENT_CLASS);
pardev = nv_subdev(parent->engine);
if (nv_parent(parent)->context_detach) {
mutex_lock(&pardev->mutex);
ret = nv_parent(parent)->context_detach(parent, suspend, object);
mutex_unlock(&pardev->mutex);
}
if (ret) {
nvkm_error(pardev, "failed to detach %s context, %d\n",
nvkm_subdev_name[subdev->index], ret);
return ret;
}
nvkm_trace(pardev, "detached %s context\n", nvkm_subdev_name[subdev->index]);
return nvkm_gpuobj_fini(&engctx->gpuobj, suspend); return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
} }
...@@ -210,30 +164,3 @@ _nvkm_engctx_fini(struct nvkm_object *object, bool suspend) ...@@ -210,30 +164,3 @@ _nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
{ {
return nvkm_engctx_fini(nv_engctx(object), suspend); return nvkm_engctx_fini(nv_engctx(object), suspend);
} }
struct nvkm_object *
nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
{
struct nvkm_engctx *engctx;
unsigned long flags;
spin_lock_irqsave(&engine->lock, flags);
list_for_each_entry(engctx, &engine->contexts, head) {
if (engctx->addr == addr) {
engctx->save = flags;
return nv_object(engctx);
}
}
spin_unlock_irqrestore(&engine->lock, flags);
return NULL;
}
void
nvkm_engctx_put(struct nvkm_object *object)
{
if (object) {
struct nvkm_engine *engine = nv_engine(object->engine);
struct nvkm_engctx *engctx = nv_engctx(object);
spin_unlock_irqrestore(&engine->lock, engctx->save);
}
}
...@@ -231,6 +231,8 @@ nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj) ...@@ -231,6 +231,8 @@ nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
nvkm_object_destroy(&gpuobj->object); nvkm_object_destroy(&gpuobj->object);
} }
#include <engine/fifo.h>
int int
nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u32 pclass, struct nvkm_oclass *oclass, u32 pclass,
...@@ -240,11 +242,19 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -240,11 +242,19 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = nv_device(parent);
struct nvkm_gpuobj *pargpu = NULL; struct nvkm_gpuobj *pargpu = NULL;
struct nvkm_gpuobj *gpuobj; struct nvkm_gpuobj *gpuobj;
struct nvkm_object *object = objgpu;
const bool zero = (flags & NVOBJ_FLAG_ZERO_ALLOC); const bool zero = (flags & NVOBJ_FLAG_ZERO_ALLOC);
int ret; int ret;
*pobject = NULL; *pobject = NULL;
while (object && object->func != &nvkm_fifo_chan_func)
object = object->parent;
if (object) {
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
pargpu = chan->inst;
} else
if (objgpu) { if (objgpu) {
while ((objgpu = nv_pclass(objgpu, NV_GPUOBJ_CLASS))) { while ((objgpu = nv_pclass(objgpu, NV_GPUOBJ_CLASS))) {
if (nv_gpuobj(objgpu)->heap.block_size) if (nv_gpuobj(objgpu)->heap.block_size)
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
*/ */
#include <core/handle.h> #include <core/handle.h>
#include <core/client.h> #include <core/client.h>
#include <core/namedb.h> #include <core/parent.h>
#define hprintk(h,l,f,a...) do { \ #define hprintk(h,l,f,a...) do { \
struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \ struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
...@@ -98,14 +98,9 @@ int ...@@ -98,14 +98,9 @@ int
nvkm_handle_create(struct nvkm_handle *parent, u32 _handle, nvkm_handle_create(struct nvkm_handle *parent, u32 _handle,
struct nvkm_object *object, struct nvkm_handle **phandle) struct nvkm_object *object, struct nvkm_handle **phandle)
{ {
struct nvkm_object *namedb;
struct nvkm_handle *handle; struct nvkm_handle *handle;
int ret; int ret;
namedb = parent ? parent->object : NULL;
while (namedb && !nv_iclass(namedb, NV_NAMEDB_CLASS))
namedb = namedb->parent;
handle = kzalloc(sizeof(*handle), GFP_KERNEL); handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle) if (!handle)
return -ENOMEM; return -ENOMEM;
...@@ -118,15 +113,6 @@ nvkm_handle_create(struct nvkm_handle *parent, u32 _handle, ...@@ -118,15 +113,6 @@ nvkm_handle_create(struct nvkm_handle *parent, u32 _handle,
handle->parent = parent; handle->parent = parent;
nvkm_object_ref(object, &handle->object); nvkm_object_ref(object, &handle->object);
if (namedb) {
ret = nvkm_namedb_insert(nv_namedb(namedb), _handle,
object, handle);
if (ret) {
kfree(handle);
return ret;
}
}
if (parent) { if (parent) {
if (nv_iclass(parent->object, NV_PARENT_CLASS) && if (nv_iclass(parent->object, NV_PARENT_CLASS) &&
nv_parent(parent->object)->object_attach) { nv_parent(parent->object)->object_attach) {
...@@ -168,40 +154,6 @@ nvkm_handle_destroy(struct nvkm_handle *handle) ...@@ -168,40 +154,6 @@ nvkm_handle_destroy(struct nvkm_handle *handle)
} }
hprintk(handle, TRACE, "destroy completed\n"); hprintk(handle, TRACE, "destroy completed\n");
nvkm_namedb_remove(handle); nvkm_object_ref(NULL, &handle->object);
kfree(handle); kfree(handle);
} }
struct nvkm_handle *
nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass)
{
struct nvkm_namedb *namedb;
if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
return nvkm_namedb_get_class(namedb, oclass);
return NULL;
}
struct nvkm_handle *
nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst)
{
struct nvkm_namedb *namedb;
if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
return nvkm_namedb_get_vinst(namedb, vinst);
return NULL;
}
struct nvkm_handle *
nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst)
{
struct nvkm_namedb *namedb;
if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
return nvkm_namedb_get_cinst(namedb, cinst);
return NULL;
}
void
nvkm_handle_put(struct nvkm_handle *handle)
{
if (handle)
nvkm_namedb_put(handle);
}
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <core/client.h> #include <core/client.h>
#include <core/engine.h> #include <core/engine.h>
#include <core/handle.h> #include <core/handle.h>
#include <core/namedb.h> #include <core/parent.h>
#include <nvif/unpack.h> #include <nvif/unpack.h>
#include <nvif/ioctl.h> #include <nvif/ioctl.h>
......
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/handle.h>
static struct nvkm_handle *
nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name)
{
struct nvkm_handle *handle;
list_for_each_entry(handle, &namedb->list, node) {
if (handle->name == name)
return handle;
}
return NULL;
}
static struct nvkm_handle *
nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, s32 oclass)
{
struct nvkm_handle *handle;
list_for_each_entry(handle, &namedb->list, node) {
if (nv_mclass(handle->object) == oclass)
return handle;
}
return NULL;
}
static struct nvkm_handle *
nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst)
{
struct nvkm_handle *handle;
list_for_each_entry(handle, &namedb->list, node) {
if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
if (nv_gpuobj(handle->object)->addr == vinst)
return handle;
}
}
return NULL;
}
static struct nvkm_handle *
nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst)
{
struct nvkm_handle *handle;
list_for_each_entry(handle, &namedb->list, node) {
if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
if (nv_gpuobj(handle->object)->node &&
nv_gpuobj(handle->object)->node->offset == cinst)
return handle;
}
}
return NULL;
}
int
nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name,
struct nvkm_object *object,
struct nvkm_handle *handle)
{
int ret = -EEXIST;
write_lock_irq(&namedb->lock);
if (!nvkm_namedb_lookup(namedb, name)) {
nvkm_object_ref(object, &handle->object);
handle->namedb = namedb;
list_add(&handle->node, &namedb->list);
ret = 0;
}
write_unlock_irq(&namedb->lock);
return ret;
}
void
nvkm_namedb_remove(struct nvkm_handle *handle)
{
struct nvkm_namedb *namedb = handle->namedb;
struct nvkm_object *object = handle->object;
if (handle->namedb) {
write_lock_irq(&namedb->lock);
list_del(&handle->node);
write_unlock_irq(&namedb->lock);
}
nvkm_object_ref(NULL, &object);
}
struct nvkm_handle *
nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name)
{
struct nvkm_handle *handle;
read_lock(&namedb->lock);
handle = nvkm_namedb_lookup(namedb, name);
if (handle == NULL)
read_unlock(&namedb->lock);
return handle;
}
struct nvkm_handle *
nvkm_namedb_get_class(struct nvkm_namedb *namedb, s32 oclass)
{
struct nvkm_handle *handle;
read_lock(&namedb->lock);
handle = nvkm_namedb_lookup_class(namedb, oclass);
if (handle == NULL)
read_unlock(&namedb->lock);
return handle;
}
struct nvkm_handle *
nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst)
{
struct nvkm_handle *handle;
read_lock(&namedb->lock);
handle = nvkm_namedb_lookup_vinst(namedb, vinst);
if (handle == NULL)
read_unlock(&namedb->lock);
return handle;
}
struct nvkm_handle *
nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst)
{
struct nvkm_handle *handle;
read_lock(&namedb->lock);
handle = nvkm_namedb_lookup_cinst(namedb, cinst);
if (handle == NULL)
read_unlock(&namedb->lock);
return handle;
}
void
nvkm_namedb_put(struct nvkm_handle *handle)
{
if (handle)
read_unlock(&handle->namedb->lock);
}
int
nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u32 pclass,
struct nvkm_oclass *sclass, u64 engcls,
int length, void **pobject)
{
struct nvkm_namedb *namedb;
int ret;
ret = nvkm_parent_create_(parent, engine, oclass, pclass |
NV_NAMEDB_CLASS, sclass, engcls,
length, pobject);
namedb = *pobject;
if (ret)
return ret;
rwlock_init(&namedb->lock);
INIT_LIST_HEAD(&namedb->list);
return 0;
}
int
_nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_namedb *object;
int ret;
ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
*pobject = nv_object(object);
if (ret)
return ret;
return 0;
}
...@@ -85,7 +85,8 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan) ...@@ -85,7 +85,8 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] " nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
"subc %d mthd %04x data %08x\n", ssta, "subc %d mthd %04x data %08x\n", ssta,
en ? en->name : "", chan ? chan->chid : -1, en ? en->name : "", chan ? chan->chid : -1,
chan ? chan->inst : 0, nvkm_client_name(chan), chan ? chan->inst->addr : 0,
chan ? chan->object.client->name : "unknown",
subc, mthd, data); subc, mthd, data);
} }
......
...@@ -121,9 +121,10 @@ g84_cipher_intr(struct nvkm_subdev *subdev) ...@@ -121,9 +121,10 @@ g84_cipher_intr(struct nvkm_subdev *subdev)
if (stat) { if (stat) {
nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat); nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat);
nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] " nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] "
"mthd %04x data %08x\n", "mthd %04x data %08x\n", stat, msg,
stat, msg, chan ? chan->chid : -1, (u64)inst << 12, chan ? chan->chid : -1, (u64)inst << 12,
nvkm_client_name(chan), mthd, data); chan ? chan->object.client->name : "unknown",
mthd, data);
} }
nvkm_fifo_chan_put(fifo, flags, &chan); nvkm_fifo_chan_put(fifo, flags, &chan);
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* *
* Authors: Ben Skeggs * Authors: Ben Skeggs
*/ */
#define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object); #define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
#include "priv.h" #include "priv.h"
#include <core/client.h> #include <core/client.h>
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "priv.h" #include "priv.h"
#include <core/client.h> #include <core/client.h>
#include <engine/fifo.h>
#include <nvif/class.h> #include <nvif/class.h>
...@@ -88,11 +89,19 @@ nvkm_dma_oclass_base = { ...@@ -88,11 +89,19 @@ nvkm_dma_oclass_base = {
.ctor = nvkm_dma_oclass_new, .ctor = nvkm_dma_oclass_new,
}; };
static int
nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
return nvkm_dma_oclass_new(oclass->engine->subdev.device,
oclass, data, size, pobject);
}
static const struct nvkm_sclass static const struct nvkm_sclass
nvkm_dma_sclass[] = { nvkm_dma_sclass[] = {
{ 0, 0, NV_DMA_FROM_MEMORY }, { 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
{ 0, 0, NV_DMA_TO_MEMORY }, { 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
{ 0, 0, NV_DMA_IN_MEMORY }, { 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
}; };
static int static int
...@@ -110,89 +119,21 @@ nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index, ...@@ -110,89 +119,21 @@ nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
return count; return count;
} }
static const struct nvkm_engine_func
nvkm_dma = {
.base.sclass = nvkm_dma_oclass_base_get,
};
#include <core/gpuobj.h>
static struct nvkm_oclass empty = {
.ofuncs = &(struct nvkm_ofuncs) {
.dtor = nvkm_object_destroy,
.init = _nvkm_object_init,
.fini = _nvkm_object_fini,
},
};
static int static int
nvkm_dmaobj_compat_ctor(struct nvkm_object *parent, struct nvkm_object *engine, nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index)
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_oclass hack = {
.base.oclass = oclass->handle,
.client = nvkm_client(parent),
.parent = parent,
.engine = nv_engine(engine),
};
struct nvkm_dma *dma = (void *)engine;
struct nvkm_dma_impl *impl = (void *)dma->engine.subdev.object.oclass;
struct nvkm_dmaobj *dmaobj = NULL;
struct nvkm_gpuobj *gpuobj;
int ret;
ret = impl->class_new(dma, &hack, data, size, &dmaobj);
if (dmaobj)
*pobject = &dmaobj->object;
if (ret)
return ret;
gpuobj = (void *)nv_pclass(parent, NV_GPUOBJ_CLASS);
ret = dmaobj->func->bind(dmaobj, gpuobj, 16, &gpuobj);
nvkm_object_ref(NULL, pobject);
if (ret)
return ret;
ret = nvkm_object_create(parent, engine, &empty, 0, pobject);
if (ret)
return ret;
gpuobj->object.parent = *pobject;
gpuobj->object.engine = &dma->engine;
gpuobj->object.oclass = oclass;
gpuobj->object.pclass = NV_GPUOBJ_CLASS;
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
gpuobj->object._magic = NVKM_OBJECT_MAGIC;
#endif
*pobject = &gpuobj->object;
return 0;
}
static void
nvkm_dmaobj_compat_dtor(struct nvkm_object *object)
{ {
struct nvkm_object *parent = object->parent; const int count = ARRAY_SIZE(nvkm_dma_sclass);
struct nvkm_gpuobj *gpuobj = (void *)object; if (index < count) {
nvkm_gpuobj_del(&gpuobj); oclass->base = nvkm_dma_sclass[index];
nvkm_object_ref(NULL, &parent); return index;
}
return count;
} }
static struct nvkm_ofuncs static const struct nvkm_engine_func
nvkm_dmaobj_compat_ofuncs = { nvkm_dma = {
.ctor = nvkm_dmaobj_compat_ctor, .base.sclass = nvkm_dma_oclass_base_get,
.dtor = nvkm_dmaobj_compat_dtor, .fifo.sclass = nvkm_dma_oclass_fifo_get,
.init = _nvkm_object_init,
.fini = _nvkm_object_fini,
};
static struct nvkm_oclass
nvkm_dma_compat_sclass[] = {
{ NV_DMA_FROM_MEMORY, &nvkm_dmaobj_compat_ofuncs },
{ NV_DMA_TO_MEMORY, &nvkm_dmaobj_compat_ofuncs },
{ NV_DMA_IN_MEMORY, &nvkm_dmaobj_compat_ofuncs },
{}
}; };
int int
...@@ -209,7 +150,6 @@ _nvkm_dma_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -209,7 +150,6 @@ _nvkm_dma_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
dmaeng->engine.sclass = nvkm_dma_compat_sclass;
dmaeng->engine.func = &nvkm_dma; dmaeng->engine.func = &nvkm_dma;
return 0; return 0;
} }
...@@ -44,12 +44,13 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags, ...@@ -44,12 +44,13 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
struct nvkm_fifo_chan * struct nvkm_fifo_chan *
nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
{ {
struct nvkm_fifo_chan *chan;
unsigned long flags; unsigned long flags;
int i;
spin_lock_irqsave(&fifo->lock, flags); spin_lock_irqsave(&fifo->lock, flags);
for (i = fifo->min; i < fifo->max; i++) { list_for_each_entry(chan, &fifo->chan, head) {
struct nvkm_fifo_chan *chan = (void *)fifo->channel[i]; if (chan->inst->addr == inst) {
if (chan && chan->inst == inst) { list_del(&chan->head);
list_add(&chan->head, &fifo->chan);
*rflags = flags; *rflags = flags;
return chan; return chan;
} }
...@@ -61,45 +62,21 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) ...@@ -61,45 +62,21 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
struct nvkm_fifo_chan * struct nvkm_fifo_chan *
nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags) nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
{ {
struct nvkm_fifo_chan *chan;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags); spin_lock_irqsave(&fifo->lock, flags);
if (fifo->channel[chid]) { list_for_each_entry(chan, &fifo->chan, head) {
*rflags = flags; if (chan->chid == chid) {
return (void *)fifo->channel[chid]; list_del(&chan->head);
list_add(&chan->head, &fifo->chan);
*rflags = flags;
return chan;
}
} }
spin_unlock_irqrestore(&fifo->lock, flags); spin_unlock_irqrestore(&fifo->lock, flags);
return NULL; return NULL;
} }
static int
nvkm_fifo_chid(struct nvkm_fifo *fifo, struct nvkm_object *object)
{
int engidx = nv_hclass(fifo) & 0xff;
while (object && object->parent) {
if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
(nv_hclass(object->parent) & 0xff) == engidx)
return nvkm_fifo_chan(object)->chid;
object = object->parent;
}
return -1;
}
const char *
nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
{
struct nvkm_fifo_chan *chan = NULL;
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
if (chid >= fifo->min && chid <= fifo->max)
chan = (void *)fifo->channel[chid];
spin_unlock_irqrestore(&fifo->lock, flags);
return nvkm_client_name(chan);
}
static int static int
nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify) struct nvkm_notify *notify)
...@@ -144,21 +121,62 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo) ...@@ -144,21 +121,62 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo)
nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
} }
static int
nvkm_fifo_class_new(struct nvkm_device *device,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
return sclass->ctor(fifo, oclass, data, size, pobject);
}
static const struct nvkm_device_oclass
nvkm_fifo_class = {
.ctor = nvkm_fifo_class_new,
};
static int
nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
const struct nvkm_device_oclass **class)
{
struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
const struct nvkm_fifo_chan_oclass *sclass;
int c = 0;
while ((sclass = fifo->func->chan[c])) {
if (c++ == index) {
oclass->base = sclass->base;
oclass->engn = sclass;
*class = &nvkm_fifo_class;
return 0;
}
}
return c;
}
void void
nvkm_fifo_destroy(struct nvkm_fifo *fifo) nvkm_fifo_destroy(struct nvkm_fifo *fifo)
{ {
kfree(fifo->channel);
nvkm_event_fini(&fifo->uevent); nvkm_event_fini(&fifo->uevent);
nvkm_event_fini(&fifo->cevent); nvkm_event_fini(&fifo->cevent);
nvkm_engine_destroy(&fifo->engine); nvkm_engine_destroy(&fifo->engine);
} }
static const struct nvkm_engine_func
nvkm_fifo_func = {
.base.sclass = nvkm_fifo_class_get,
};
int int
nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, struct nvkm_oclass *oclass,
int min, int max, int length, void **pobject) int min, int max, int length, void **pobject)
{ {
struct nvkm_fifo *fifo; struct nvkm_fifo *fifo;
int nr = max + 1;
int cnt = nr - min;
int ret; int ret;
ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO", ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
...@@ -167,17 +185,21 @@ nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -167,17 +185,21 @@ nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
fifo->min = min; fifo->engine.func = &nvkm_fifo_func;
fifo->max = max; INIT_LIST_HEAD(&fifo->chan);
fifo->channel = kzalloc(sizeof(*fifo->channel) * (max + 1), GFP_KERNEL);
if (!fifo->channel) fifo->nr = nr;
return -ENOMEM; if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR)) {
fifo->nr = NVKM_FIFO_CHID_NR;
cnt = fifo->nr - min;
}
bitmap_fill(fifo->mask, NVKM_FIFO_CHID_NR);
bitmap_clear(fifo->mask, min, cnt);
ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent); ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
if (ret) if (ret)
return ret; return ret;
fifo->chid = nvkm_fifo_chid;
spin_lock_init(&fifo->lock); spin_lock_init(&fifo->lock);
return 0; return 0;
} }
...@@ -2,27 +2,31 @@ ...@@ -2,27 +2,31 @@
#define __NVKM_FIFO_CHAN_H__ #define __NVKM_FIFO_CHAN_H__
#include "priv.h" #include "priv.h"
#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d) \ struct nvkm_fifo_chan_func {
nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \ void *(*dtor)(struct nvkm_fifo_chan *);
(m), sizeof(**d), (void **)d) void (*init)(struct nvkm_fifo_chan *);
#define nvkm_fifo_channel_init(p) \ void (*fini)(struct nvkm_fifo_chan *);
nvkm_namedb_init(&(p)->namedb) int (*ntfy)(struct nvkm_fifo_chan *, u32 type, struct nvkm_event **);
#define nvkm_fifo_channel_fini(p,s) \ int (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
nvkm_namedb_fini(&(p)->namedb, (s)) struct nvkm_object *);
void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
int (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *);
int (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *,
bool suspend);
int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
void (*object_dtor)(struct nvkm_fifo_chan *, int);
};
int nvkm_fifo_channel_create_(struct nvkm_object *, int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
struct nvkm_object *, u32 size, u32 align, bool zero, u64 vm, u64 push,
struct nvkm_oclass *, u64 engines, int bar, u32 base, u32 user,
int bar, u32 addr, u32 size, u64 push, const struct nvkm_oclass *, struct nvkm_fifo_chan *);
u64 engmask, int len, void **);
void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
#define _nvkm_fifo_channel_init _nvkm_namedb_init struct nvkm_fifo_chan_oclass {
#define _nvkm_fifo_channel_fini _nvkm_namedb_fini int (*ctor)(struct nvkm_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
struct nvkm_sclass base;
};
void _nvkm_fifo_channel_dtor(struct nvkm_object *); int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
int _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
u32 _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
int _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
#endif #endif
#ifndef __GF100_FIFO_CHAN_H__ #ifndef __GF100_FIFO_CHAN_H__
#define __GF100_FIFO_CHAN_H__ #define __GF100_FIFO_CHAN_H__
#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
#include "chan.h" #include "chan.h"
#include "gf100.h" #include "gf100.h"
struct gf100_fifo_base { struct gf100_fifo_chan {
struct nvkm_fifo_base base; struct nvkm_fifo_chan base;
struct gf100_fifo *fifo;
struct list_head head;
bool killed;
struct nvkm_gpuobj *pgd; struct nvkm_gpuobj *pgd;
struct nvkm_vm *vm; struct nvkm_vm *vm;
};
struct gf100_fifo_chan { struct {
struct nvkm_fifo_chan base; struct nvkm_gpuobj *inst;
enum { struct nvkm_vma vma;
STOPPED, } engn[NVDEV_SUBDEV_NR];
RUNNING,
KILLED
} state;
}; };
extern struct nvkm_oclass gf100_fifo_cclass; extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
extern struct nvkm_oclass gf100_fifo_sclass[];
#endif #endif
#ifndef __GK104_FIFO_CHAN_H__ #ifndef __GK104_FIFO_CHAN_H__
#define __GK104_FIFO_CHAN_H__ #define __GK104_FIFO_CHAN_H__
#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
#include "chan.h" #include "chan.h"
#include "gk104.h" #include "gk104.h"
struct gk104_fifo_base { struct gk104_fifo_chan {
struct nvkm_fifo_base base; struct nvkm_fifo_chan base;
struct gk104_fifo *fifo;
int engine;
struct list_head head;
bool killed;
struct nvkm_gpuobj *pgd; struct nvkm_gpuobj *pgd;
struct nvkm_vm *vm; struct nvkm_vm *vm;
};
struct gk104_fifo_chan { struct {
struct nvkm_fifo_chan base; struct nvkm_gpuobj *inst;
u32 engine; struct nvkm_vma vma;
enum { } engn[NVDEV_SUBDEV_NR];
STOPPED,
RUNNING,
KILLED
} state;
}; };
extern struct nvkm_oclass gk104_fifo_cclass; int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
extern struct nvkm_oclass gk104_fifo_sclass[]; void *data, u32 size, struct nvkm_object **);
extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
extern struct nvkm_oclass gm204_fifo_sclass[]; extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass;
#endif #endif
#ifndef __NV04_FIFO_CHAN_H__ #ifndef __NV04_FIFO_CHAN_H__
#define __NV04_FIFO_CHAN_H__ #define __NV04_FIFO_CHAN_H__
#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
#include "chan.h" #include "chan.h"
#include "nv04.h" #include "nv04.h"
struct nv04_fifo_chan { struct nv04_fifo_chan {
struct nvkm_fifo_chan base; struct nvkm_fifo_chan base;
u32 subc[8]; struct nv04_fifo *fifo;
u32 ramfc; u32 ramfc;
struct nvkm_gpuobj *engn[NVDEV_SUBDEV_NR];
}; };
int nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32); extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
void nv04_fifo_object_detach(struct nvkm_object *, int); void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *);
void nv04_fifo_dma_init(struct nvkm_fifo_chan *);
void nv04_fifo_dma_fini(struct nvkm_fifo_chan *);
void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int);
void nv04_fifo_chan_dtor(struct nvkm_object *); extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass;
int nv04_fifo_chan_init(struct nvkm_object *); extern const struct nvkm_fifo_chan_oclass nv10_fifo_dma_oclass;
int nv04_fifo_chan_fini(struct nvkm_object *, bool suspend); extern const struct nvkm_fifo_chan_oclass nv17_fifo_dma_oclass;
extern const struct nvkm_fifo_chan_oclass nv40_fifo_dma_oclass;
extern struct nvkm_oclass nv04_fifo_cclass;
extern struct nvkm_oclass nv04_fifo_sclass[];
extern struct nvkm_oclass nv10_fifo_sclass[];
extern struct nvkm_oclass nv17_fifo_sclass[];
extern struct nvkm_oclass nv40_fifo_sclass[];
#endif #endif
#ifndef __NV50_FIFO_CHAN_H__ #ifndef __NV50_FIFO_CHAN_H__
#define __NV50_FIFO_CHAN_H__ #define __NV50_FIFO_CHAN_H__
#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
#include "chan.h" #include "chan.h"
#include "nv50.h" #include "nv50.h"
struct nv50_fifo_base { struct nv50_fifo_chan {
struct nvkm_fifo_base base; struct nv50_fifo *fifo;
struct nvkm_fifo_chan base;
struct nvkm_gpuobj *ramfc; struct nvkm_gpuobj *ramfc;
struct nvkm_gpuobj *cache; struct nvkm_gpuobj *cache;
struct nvkm_gpuobj *eng; struct nvkm_gpuobj *eng;
struct nvkm_gpuobj *pgd; struct nvkm_gpuobj *pgd;
struct nvkm_ramht *ramht;
struct nvkm_vm *vm; struct nvkm_vm *vm;
};
struct nv50_fifo_chan { struct nvkm_gpuobj *engn[NVDEV_SUBDEV_NR];
struct nvkm_fifo_chan base;
u32 subc[8];
struct nvkm_ramht *ramht;
}; };
extern struct nvkm_oclass nv50_fifo_cclass; int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
extern struct nvkm_oclass nv50_fifo_sclass[]; const struct nvkm_oclass *, struct nv50_fifo_chan *);
void nv50_fifo_context_dtor(struct nvkm_object *); void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
void nv50_fifo_chan_dtor(struct nvkm_object *); void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
int nv50_fifo_chan_init(struct nvkm_object *); void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
int nv50_fifo_chan_fini(struct nvkm_object *, bool); void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
int nv50_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
int nv50_fifo_context_detach(struct nvkm_object *, bool, int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
struct nvkm_object *); const struct nvkm_oclass *, struct nv50_fifo_chan *);
int nv50_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
void nv50_fifo_object_detach(struct nvkm_object *, int);
extern struct nvkm_ofuncs nv50_fifo_ofuncs_ind;
extern struct nvkm_oclass g84_fifo_cclass; extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
extern struct nvkm_oclass g84_fifo_sclass[]; extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
int g84_fifo_chan_init(struct nvkm_object *); extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
int g84_fifo_context_attach(struct nvkm_object *, struct nvkm_object *); extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
int g84_fifo_context_detach(struct nvkm_object *, bool,
struct nvkm_object *);
int g84_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
extern struct nvkm_ofuncs g84_fifo_ofuncs_ind;
#endif #endif
...@@ -30,15 +30,14 @@ ...@@ -30,15 +30,14 @@
#include <nvif/unpack.h> #include <nvif/unpack.h>
static int static int
g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct nvkm_oclass *oclass, void *data, u32 size, void *data, u32 size, struct nvkm_object **pobject)
struct nvkm_object **pobject)
{ {
struct nvkm_object *parent = oclass->parent;
union { union {
struct nv50_channel_dma_v0 v0; struct nv50_channel_dma_v0 v0;
} *args = data; } *args = data;
struct nvkm_device *device = parent->engine->subdev.device; struct nv50_fifo *fifo = nv50_fifo(base);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan; struct nv50_fifo_chan *chan;
int ret; int ret;
...@@ -48,80 +47,47 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -48,80 +47,47 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
"pushbuf %llx offset %016llx\n", "pushbuf %llx offset %016llx\n",
args->v0.version, args->v0.vm, args->v0.pushbuf, args->v0.version, args->v0.vm, args->v0.pushbuf,
args->v0.offset); args->v0.offset);
if (args->v0.vm) if (!args->v0.pushbuf)
return -ENOENT; return -EINVAL;
} else } else
return ret; return ret;
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
0x2000, args->v0.pushbuf, return -ENOMEM;
(1ULL << NVDEV_ENGINE_DMAOBJ) | *pobject = &chan->base.object;
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_MPEG) |
(1ULL << NVDEV_ENGINE_ME) |
(1ULL << NVDEV_ENGINE_VP) |
(1ULL << NVDEV_ENGINE_CIPHER) |
(1ULL << NVDEV_ENGINE_SEC) |
(1ULL << NVDEV_ENGINE_BSP) |
(1ULL << NVDEV_ENGINE_MSVLD) |
(1ULL << NVDEV_ENGINE_MSPDEC) |
(1ULL << NVDEV_ENGINE_MSPPP) |
(1ULL << NVDEV_ENGINE_CE0) |
(1ULL << NVDEV_ENGINE_VIC), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->base.inst = base->base.gpuobj.addr;
args->v0.chid = chan->base.chid;
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj, ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
&chan->ramht); oclass, chan);
if (ret) if (ret)
return ret; return ret;
nv_parent(chan)->context_attach = g84_fifo_context_attach; args->v0.chid = chan->base.chid;
nv_parent(chan)->context_detach = g84_fifo_context_detach;
nv_parent(chan)->object_attach = g84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
nvkm_kmap(base->ramfc); nvkm_kmap(chan->ramfc);
nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset)); nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset)); nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x3c, 0x003f6078); nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
nvkm_wo32(base->ramfc, 0x44, 0x01003fff); nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
nvkm_wo32(base->ramfc, 0x4c, 0xffffffff); nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff); nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(base->ramfc, 0x78, 0x00000000); nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x7c, 0x30000001); nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ | (4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4)); (chan->ramht->gpuobj->node->offset >> 4));
nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10); nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12); nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
nvkm_done(base->ramfc); nvkm_done(chan->ramfc);
return 0; return 0;
} }
static struct nvkm_ofuncs const struct nvkm_fifo_chan_oclass
g84_fifo_ofuncs_dma = { g84_fifo_dma_oclass = {
.ctor = g84_fifo_chan_ctor_dma, .base.oclass = G82_CHANNEL_DMA,
.dtor = nv50_fifo_chan_dtor, .base.minver = 0,
.init = g84_fifo_chan_init, .base.maxver = 0,
.fini = nv50_fifo_chan_fini, .ctor = g84_fifo_dma_new,
.map = _nvkm_fifo_channel_map,
.rd32 = _nvkm_fifo_channel_rd32,
.wr32 = _nvkm_fifo_channel_wr32,
.ntfy = _nvkm_fifo_channel_ntfy
};
struct nvkm_oclass
g84_fifo_sclass[] = {
{ G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
{ G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
{}
}; };
...@@ -31,74 +31,51 @@ ...@@ -31,74 +31,51 @@
#include <nvif/class.h> #include <nvif/class.h>
#include <nvif/unpack.h> #include <nvif/unpack.h>
int
nv04_fifo_context_attach(struct nvkm_object *parent,
struct nvkm_object *object)
{
nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
return 0;
}
void void
nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
{ {
struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
mutex_lock(&nv_subdev(fifo)->mutex);
nvkm_ramht_remove(imem->ramht, cookie); nvkm_ramht_remove(imem->ramht, cookie);
mutex_unlock(&nv_subdev(fifo)->mutex);
} }
int static int
nv04_fifo_object_attach(struct nvkm_object *parent, nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object, u32 handle) struct nvkm_object *object)
{ {
struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo_chan *chan = (void *)parent; struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; u32 context = 0x80000000 | chan->base.chid << 24;
u32 context, chid = chan->base.chid; u32 handle = object->handle;
int ret; int hash;
if (nv_iclass(object, NV_GPUOBJ_CLASS)) switch (object->engine->subdev.index) {
context = nv_gpuobj(object)->addr >> 4; case NVDEV_ENGINE_DMAOBJ:
else case NVDEV_ENGINE_SW : context |= 0x00000000; break;
context = 0x00000004; /* just non-zero */ case NVDEV_ENGINE_GR : context |= 0x00010000; break;
case NVDEV_ENGINE_MPEG : context |= 0x00020000; break;
if (object->engine) { default:
switch (nv_engidx(object->engine)) { WARN_ON(1);
case NVDEV_ENGINE_DMAOBJ: return -EINVAL;
case NVDEV_ENGINE_SW:
context |= 0x00000000;
break;
case NVDEV_ENGINE_GR:
context |= 0x00010000;
break;
case NVDEV_ENGINE_MPEG:
context |= 0x00020000;
break;
default:
return -EINVAL;
}
} }
context |= 0x80000000; /* valid */ mutex_lock(&chan->fifo->base.engine.subdev.mutex);
context |= chid << 24; hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
handle, context);
mutex_lock(&nv_subdev(fifo)->mutex); mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context); return hash;
mutex_unlock(&nv_subdev(fifo)->mutex);
return ret;
} }
int void
nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
{ {
struct nv04_fifo *fifo = (void *)object->engine; struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo_chan *chan = (void *)object; struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_memory *fctx = device->imem->ramfc; struct nvkm_memory *fctx = device->imem->ramfc;
struct ramfc_desc *c; struct ramfc_desc *c;
unsigned long flags; unsigned long flags;
u32 mask = fifo->base.nr - 1;
u32 data = chan->ramfc; u32 data = chan->ramfc;
u32 chid; u32 chid;
...@@ -107,7 +84,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) ...@@ -107,7 +84,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
nvkm_wr32(device, NV03_PFIFO_CACHES, 0); nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
/* if this channel is active, replace it with a null context */ /* if this channel is active, replace it with a null context */
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
if (chid == chan->base.chid) { if (chid == chan->base.chid) {
nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
...@@ -129,7 +106,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) ...@@ -129,7 +106,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
} }
...@@ -138,35 +115,26 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) ...@@ -138,35 +115,26 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1); nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&fifo->base.lock, flags); spin_unlock_irqrestore(&fifo->base.lock, flags);
return nvkm_fifo_channel_fini(&chan->base, suspend);
} }
int void
nv04_fifo_chan_init(struct nvkm_object *object) nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
{ {
struct nv04_fifo *fifo = (void *)object->engine; struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo_chan *chan = (void *)object; struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_device *device = fifo->base.engine.subdev.device;
u32 mask = 1 << chan->base.chid; u32 mask = 1 << chan->base.chid;
unsigned long flags; unsigned long flags;
int ret;
ret = nvkm_fifo_channel_init(&chan->base);
if (ret)
return ret;
spin_lock_irqsave(&fifo->base.lock, flags); spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_mask(device, NV04_PFIFO_MODE, mask, mask); nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
spin_unlock_irqrestore(&fifo->base.lock, flags); spin_unlock_irqrestore(&fifo->base.lock, flags);
return 0;
} }
void void *
nv04_fifo_chan_dtor(struct nvkm_object *object) nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
{ {
struct nv04_fifo *fifo = (void *)object->engine; struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo_chan *chan = (void *)object; struct nv04_fifo *fifo = chan->fifo;
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
struct ramfc_desc *c = fifo->ramfc_desc; struct ramfc_desc *c = fifo->ramfc_desc;
...@@ -175,22 +143,30 @@ nv04_fifo_chan_dtor(struct nvkm_object *object) ...@@ -175,22 +143,30 @@ nv04_fifo_chan_dtor(struct nvkm_object *object)
nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000); nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
} while ((++c)->bits); } while ((++c)->bits);
nvkm_done(imem->ramfc); nvkm_done(imem->ramfc);
return chan;
nvkm_fifo_channel_destroy(&chan->base);
} }
const struct nvkm_fifo_chan_func
nv04_fifo_dma_func = {
.dtor = nv04_fifo_dma_dtor,
.init = nv04_fifo_dma_init,
.fini = nv04_fifo_dma_fini,
.object_ctor = nv04_fifo_dma_object_ctor,
.object_dtor = nv04_fifo_dma_object_dtor,
};
static int static int
nv04_fifo_chan_ctor(struct nvkm_object *parent, nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct nvkm_object *engine, void *data, u32 size, struct nvkm_object **pobject)
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{ {
struct nvkm_object *parent = oclass->parent;
union { union {
struct nv03_channel_dma_v0 v0; struct nv03_channel_dma_v0 v0;
} *args = data; } *args = data;
struct nv04_fifo *fifo = (void *)engine; struct nv04_fifo *fifo = nv04_fifo(base);
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; struct nv04_fifo_chan *chan = NULL;
struct nv04_fifo_chan *chan; struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
int ret; int ret;
nvif_ioctl(parent, "create channel dma size %d\n", size); nvif_ioctl(parent, "create channel dma size %d\n", size);
...@@ -198,29 +174,32 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent, ...@@ -198,29 +174,32 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
"offset %08x\n", args->v0.version, "offset %08x\n", args->v0.version,
args->v0.pushbuf, args->v0.offset); args->v0.pushbuf, args->v0.offset);
if (!args->v0.pushbuf)
return -EINVAL;
} else } else
return ret; return ret;
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
0x10000, args->v0.pushbuf, return -ENOMEM;
(1ULL << NVDEV_ENGINE_DMAOBJ) | *pobject = &chan->base.object;
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR), &chan); ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
*pobject = nv_object(chan); 0x1000, 0x1000, false, 0, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_SW),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret) if (ret)
return ret; return ret;
args->v0.chid = chan->base.chid; args->v0.chid = chan->base.chid;
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 32; chan->ramfc = chan->base.chid * 32;
nvkm_kmap(imem->ramfc); nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x10, nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
...@@ -232,51 +211,10 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent, ...@@ -232,51 +211,10 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
return 0; return 0;
} }
static struct nvkm_ofuncs const struct nvkm_fifo_chan_oclass
nv04_fifo_ofuncs = { nv04_fifo_dma_oclass = {
.ctor = nv04_fifo_chan_ctor, .base.oclass = NV03_CHANNEL_DMA,
.dtor = nv04_fifo_chan_dtor, .base.minver = 0,
.init = nv04_fifo_chan_init, .base.maxver = 0,
.fini = nv04_fifo_chan_fini, .ctor = nv04_fifo_dma_new,
.map = _nvkm_fifo_channel_map,
.rd32 = _nvkm_fifo_channel_rd32,
.wr32 = _nvkm_fifo_channel_wr32,
.ntfy = _nvkm_fifo_channel_ntfy
};
struct nvkm_oclass
nv04_fifo_sclass[] = {
{ NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
{}
};
int
nv04_fifo_context_ctor(struct nvkm_object *parent,
struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nv04_fifo_base *base;
int ret;
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
0x1000, NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
return ret;
return 0;
}
struct nvkm_oclass
nv04_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x04),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fifo_context_ctor,
.dtor = _nvkm_fifo_context_dtor,
.init = _nvkm_fifo_context_init,
.fini = _nvkm_fifo_context_fini,
.rd32 = _nvkm_fifo_context_rd32,
.wr32 = _nvkm_fifo_context_wr32,
},
}; };
...@@ -31,17 +31,17 @@ ...@@ -31,17 +31,17 @@
#include <nvif/unpack.h> #include <nvif/unpack.h>
static int static int
nv10_fifo_chan_ctor(struct nvkm_object *parent, nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct nvkm_object *engine, void *data, u32 size, struct nvkm_object **pobject)
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{ {
struct nvkm_object *parent = oclass->parent;
union { union {
struct nv03_channel_dma_v0 v0; struct nv03_channel_dma_v0 v0;
} *args = data; } *args = data;
struct nv04_fifo *fifo = (void *)engine; struct nv04_fifo *fifo = nv04_fifo(base);
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; struct nv04_fifo_chan *chan = NULL;
struct nv04_fifo_chan *chan; struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
int ret; int ret;
nvif_ioctl(parent, "create channel dma size %d\n", size); nvif_ioctl(parent, "create channel dma size %d\n", size);
...@@ -49,29 +49,32 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent, ...@@ -49,29 +49,32 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
"offset %08x\n", args->v0.version, "offset %08x\n", args->v0.version,
args->v0.pushbuf, args->v0.offset); args->v0.pushbuf, args->v0.offset);
if (!args->v0.pushbuf)
return -EINVAL;
} else } else
return ret; return ret;
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
0x10000, args->v0.pushbuf, return -ENOMEM;
(1ULL << NVDEV_ENGINE_DMAOBJ) | *pobject = &chan->base.object;
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR), &chan); ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
*pobject = nv_object(chan); 0x1000, 0x1000, false, 0, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_SW),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret) if (ret)
return ret; return ret;
args->v0.chid = chan->base.chid; args->v0.chid = chan->base.chid;
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 32; chan->ramfc = chan->base.chid * 32;
nvkm_kmap(imem->ramfc); nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14, nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
...@@ -83,20 +86,10 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent, ...@@ -83,20 +86,10 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
return 0; return 0;
} }
static struct nvkm_ofuncs const struct nvkm_fifo_chan_oclass
nv10_fifo_ofuncs = { nv10_fifo_dma_oclass = {
.ctor = nv10_fifo_chan_ctor, .base.oclass = NV10_CHANNEL_DMA,
.dtor = nv04_fifo_chan_dtor, .base.minver = 0,
.init = nv04_fifo_chan_init, .base.maxver = 0,
.fini = nv04_fifo_chan_fini, .ctor = nv10_fifo_dma_new,
.map = _nvkm_fifo_channel_map,
.rd32 = _nvkm_fifo_channel_rd32,
.wr32 = _nvkm_fifo_channel_wr32,
.ntfy = _nvkm_fifo_channel_ntfy
};
struct nvkm_oclass
nv10_fifo_sclass[] = {
{ NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
{}
}; };
...@@ -31,17 +31,17 @@ ...@@ -31,17 +31,17 @@
#include <nvif/unpack.h> #include <nvif/unpack.h>
static int static int
nv17_fifo_chan_ctor(struct nvkm_object *parent, nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct nvkm_object *engine, void *data, u32 size, struct nvkm_object **pobject)
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{ {
struct nvkm_object *parent = oclass->parent;
union { union {
struct nv03_channel_dma_v0 v0; struct nv03_channel_dma_v0 v0;
} *args = data; } *args = data;
struct nv04_fifo *fifo = (void *)engine; struct nv04_fifo *fifo = nv04_fifo(base);
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; struct nv04_fifo_chan *chan = NULL;
struct nv04_fifo_chan *chan; struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
int ret; int ret;
nvif_ioctl(parent, "create channel dma size %d\n", size); nvif_ioctl(parent, "create channel dma size %d\n", size);
...@@ -49,31 +49,33 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent, ...@@ -49,31 +49,33 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
"offset %08x\n", args->v0.version, "offset %08x\n", args->v0.version,
args->v0.pushbuf, args->v0.offset); args->v0.pushbuf, args->v0.offset);
if (!args->v0.pushbuf)
return -EINVAL;
} else } else
return ret; return ret;
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
0x10000, args->v0.pushbuf, return -ENOMEM;
(1ULL << NVDEV_ENGINE_DMAOBJ) | *pobject = &chan->base.object;
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) | ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
(1ULL << NVDEV_ENGINE_MPEG), /* NV31- */ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
&chan); (1ULL << NVDEV_ENGINE_DMAOBJ) |
*pobject = nv_object(chan); (1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_MPEG) | /* NV31- */
(1ULL << NVDEV_ENGINE_SW),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret) if (ret)
return ret; return ret;
args->v0.chid = chan->base.chid; args->v0.chid = chan->base.chid;
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 64; chan->ramfc = chan->base.chid * 64;
nvkm_kmap(imem->ramfc); nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14, nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
...@@ -85,20 +87,10 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent, ...@@ -85,20 +87,10 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
return 0; return 0;
} }
static struct nvkm_ofuncs const struct nvkm_fifo_chan_oclass
nv17_fifo_ofuncs = { nv17_fifo_dma_oclass = {
.ctor = nv17_fifo_chan_ctor, .base.oclass = NV17_CHANNEL_DMA,
.dtor = nv04_fifo_chan_dtor, .base.minver = 0,
.init = nv04_fifo_chan_init, .base.maxver = 0,
.fini = nv04_fifo_chan_fini, .ctor = nv17_fifo_dma_new,
.map = _nvkm_fifo_channel_map,
.rd32 = _nvkm_fifo_channel_rd32,
.wr32 = _nvkm_fifo_channel_wr32,
.ntfy = _nvkm_fifo_channel_ntfy
};
struct nvkm_oclass
nv17_fifo_sclass[] = {
{ NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
{}
}; };
...@@ -31,36 +31,47 @@ ...@@ -31,36 +31,47 @@
#include <nvif/class.h> #include <nvif/class.h>
#include <nvif/unpack.h> #include <nvif/unpack.h>
static bool
nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
{
switch (engine->subdev.index) {
case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW:
return false;
case NVDEV_ENGINE_GR:
*reg = 0x0032e0;
*ctx = 0x38;
return true;
case NVDEV_ENGINE_MPEG:
*reg = 0x00330c;
*ctx = 0x54;
return true;
default:
WARN_ON(1);
return false;
}
}
static int static int
nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend, nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_object *engctx) struct nvkm_engine *engine, bool suspend)
{ {
struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo_chan *chan = (void *)parent; struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem; struct nvkm_instmem *imem = device->imem;
unsigned long flags; unsigned long flags;
u32 reg, ctx; u32 reg, ctx;
int chid;
switch (nv_engidx(engctx->engine)) { if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
case NVDEV_ENGINE_SW:
return 0; return 0;
case NVDEV_ENGINE_GR:
reg = 0x32e0;
ctx = 0x38;
break;
case NVDEV_ENGINE_MPEG:
reg = 0x330c;
ctx = 0x54;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&fifo->base.lock, flags); spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid) chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
if (chid == chan->base.chid)
nvkm_wr32(device, reg, 0x00000000); nvkm_wr32(device, reg, 0x00000000);
nvkm_kmap(imem->ramfc); nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000); nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
...@@ -72,38 +83,29 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend, ...@@ -72,38 +83,29 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
} }
static int static int
nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx) nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{ {
struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo_chan *chan = (void *)parent; struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem; struct nvkm_instmem *imem = device->imem;
unsigned long flags; unsigned long flags;
u32 reg, ctx; u32 inst, reg, ctx;
int chid;
switch (nv_engidx(engctx->engine)) { if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
case NVDEV_ENGINE_SW:
return 0; return 0;
case NVDEV_ENGINE_GR: inst = chan->engn[engine->subdev.index]->addr >> 4;
reg = 0x32e0;
ctx = 0x38;
break;
case NVDEV_ENGINE_MPEG:
reg = 0x330c;
ctx = 0x54;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&fifo->base.lock, flags); spin_lock_irqsave(&fifo->base.lock, flags);
nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid) chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
nvkm_wr32(device, reg, nv_engctx(engctx)->addr); if (chid == chan->base.chid)
nvkm_wr32(device, reg, inst);
nvkm_kmap(imem->ramfc); nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr); nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
nvkm_done(imem->ramfc); nvkm_done(imem->ramfc);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001); nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
...@@ -111,57 +113,91 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx) ...@@ -111,57 +113,91 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
return 0; return 0;
} }
static void
nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
if (!chan->engn[engine->subdev.index] ||
chan->engn[engine->subdev.index]->object.oclass) {
chan->engn[engine->subdev.index] = NULL;
return;
}
nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
}
static int static int
nv40_fifo_object_attach(struct nvkm_object *parent, nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object, u32 handle) struct nvkm_engine *engine,
struct nvkm_object *object)
{ {
struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo_chan *chan = (void *)parent; const int engn = engine->subdev.index;
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; u32 reg, ctx;
u32 context, chid = chan->base.chid;
int ret;
if (nv_iclass(object, NV_GPUOBJ_CLASS)) if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
context = nv_gpuobj(object)->addr >> 4; return 0;
else
context = 0x00000004; /* just non-zero */ if (nv_iclass(object, NV_GPUOBJ_CLASS)) {
chan->engn[engn] = nv_gpuobj(object);
if (object->engine) { return 0;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW:
context |= 0x00000000;
break;
case NVDEV_ENGINE_GR:
context |= 0x00100000;
break;
case NVDEV_ENGINE_MPEG:
context |= 0x00200000;
break;
default:
return -EINVAL;
}
} }
context |= chid << 23; return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
}
mutex_lock(&nv_subdev(fifo)->mutex); static int
ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context); nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
mutex_unlock(&nv_subdev(fifo)->mutex); struct nvkm_object *object)
return ret; {
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
u32 context = chan->base.chid << 23;
u32 handle = object->handle;
int hash;
switch (object->engine->subdev.index) {
case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW : context |= 0x00000000; break;
case NVDEV_ENGINE_GR : context |= 0x00100000; break;
case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
default:
WARN_ON(1);
return -EINVAL;
}
mutex_lock(&chan->fifo->base.engine.subdev.mutex);
hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
handle, context);
mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
return hash;
} }
static const struct nvkm_fifo_chan_func
nv40_fifo_dma_func = {
.dtor = nv04_fifo_dma_dtor,
.init = nv04_fifo_dma_init,
.fini = nv04_fifo_dma_fini,
.engine_ctor = nv40_fifo_dma_engine_ctor,
.engine_dtor = nv40_fifo_dma_engine_dtor,
.engine_init = nv40_fifo_dma_engine_init,
.engine_fini = nv40_fifo_dma_engine_fini,
.object_ctor = nv40_fifo_dma_object_ctor,
.object_dtor = nv04_fifo_dma_object_dtor,
};
static int static int
nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct nvkm_oclass *oclass, void *data, u32 size, void *data, u32 size, struct nvkm_object **pobject)
struct nvkm_object **pobject)
{ {
struct nvkm_object *parent = oclass->parent;
union { union {
struct nv03_channel_dma_v0 v0; struct nv03_channel_dma_v0 v0;
} *args = data; } *args = data;
struct nv04_fifo *fifo = (void *)engine; struct nv04_fifo *fifo = nv04_fifo(base);
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; struct nv04_fifo_chan *chan = NULL;
struct nv04_fifo_chan *chan; struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
int ret; int ret;
nvif_ioctl(parent, "create channel dma size %d\n", size); nvif_ioctl(parent, "create channel dma size %d\n", size);
...@@ -169,31 +205,33 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -169,31 +205,33 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
"offset %08x\n", args->v0.version, "offset %08x\n", args->v0.version,
args->v0.pushbuf, args->v0.offset); args->v0.pushbuf, args->v0.offset);
if (!args->v0.pushbuf)
return -EINVAL;
} else } else
return ret; return ret;
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
0x1000, args->v0.pushbuf, return -ENOMEM;
(1ULL << NVDEV_ENGINE_DMAOBJ) | *pobject = &chan->base.object;
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) | ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
(1ULL << NVDEV_ENGINE_MPEG), &chan); 0x1000, 0x1000, false, 0, args->v0.pushbuf,
*pobject = nv_object(chan); (1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_MPEG) |
(1ULL << NVDEV_ENGINE_SW),
0, 0xc00000, 0x1000, oclass, &chan->base);
chan->fifo = fifo;
if (ret) if (ret)
return ret; return ret;
args->v0.chid = chan->base.chid; args->v0.chid = chan->base.chid;
nv_parent(chan)->context_attach = nv40_fifo_context_attach;
nv_parent(chan)->context_detach = nv40_fifo_context_detach;
nv_parent(chan)->object_attach = nv40_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
chan->ramfc = chan->base.chid * 128; chan->ramfc = chan->base.chid * 128;
nvkm_kmap(imem->ramfc); nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
...@@ -206,20 +244,10 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -206,20 +244,10 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return 0; return 0;
} }
static struct nvkm_ofuncs const struct nvkm_fifo_chan_oclass
nv40_fifo_ofuncs = { nv40_fifo_dma_oclass = {
.ctor = nv40_fifo_chan_ctor, .base.oclass = NV40_CHANNEL_DMA,
.dtor = nv04_fifo_chan_dtor, .base.minver = 0,
.init = nv04_fifo_chan_init, .base.maxver = 0,
.fini = nv04_fifo_chan_fini, .ctor = nv40_fifo_dma_new,
.map = _nvkm_fifo_channel_map,
.rd32 = _nvkm_fifo_channel_rd32,
.wr32 = _nvkm_fifo_channel_wr32,
.ntfy = _nvkm_fifo_channel_ntfy
};
struct nvkm_oclass
nv40_fifo_sclass[] = {
{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
{}
}; };
...@@ -30,15 +30,14 @@ ...@@ -30,15 +30,14 @@
#include <nvif/unpack.h> #include <nvif/unpack.h>
static int static int
nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct nvkm_oclass *oclass, void *data, u32 size, void *data, u32 size, struct nvkm_object **pobject)
struct nvkm_object **pobject)
{ {
struct nvkm_object *parent = oclass->parent;
union { union {
struct nv50_channel_dma_v0 v0; struct nv50_channel_dma_v0 v0;
} *args = data; } *args = data;
struct nvkm_device *device = parent->engine->subdev.device; struct nv50_fifo *fifo = nv50_fifo(base);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan; struct nv50_fifo_chan *chan;
int ret; int ret;
...@@ -48,68 +47,45 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -48,68 +47,45 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
"pushbuf %llx offset %016llx\n", "pushbuf %llx offset %016llx\n",
args->v0.version, args->v0.vm, args->v0.pushbuf, args->v0.version, args->v0.vm, args->v0.pushbuf,
args->v0.offset); args->v0.offset);
if (args->v0.vm) if (!args->v0.pushbuf)
return -ENOENT; return -EINVAL;
} else } else
return ret; return ret;
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
0x2000, args->v0.pushbuf, return -ENOMEM;
(1ULL << NVDEV_ENGINE_DMAOBJ) | *pobject = &chan->base.object;
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) | ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_MPEG), &chan); oclass, chan);
*pobject = nv_object(chan);
if (ret) if (ret)
return ret; return ret;
chan->base.inst = base->base.gpuobj.addr;
args->v0.chid = chan->base.chid; args->v0.chid = chan->base.chid;
nv_parent(chan)->context_attach = nv50_fifo_context_attach; nvkm_kmap(chan->ramfc);
nv_parent(chan)->context_detach = nv50_fifo_context_detach; nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
nv_parent(chan)->object_attach = nv50_fifo_object_attach; nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
nv_parent(chan)->object_detach = nv50_fifo_object_detach; nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj, nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
&chan->ramht); nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
if (ret) nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
return ret; nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_kmap(base->ramfc); nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ | (4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4)); (chan->ramht->gpuobj->node->offset >> 4));
nvkm_done(base->ramfc); nvkm_done(chan->ramfc);
return 0; return 0;
} }
static struct nvkm_ofuncs const struct nvkm_fifo_chan_oclass
nv50_fifo_ofuncs_dma = { nv50_fifo_dma_oclass = {
.ctor = nv50_fifo_chan_ctor_dma, .base.oclass = NV50_CHANNEL_DMA,
.dtor = nv50_fifo_chan_dtor, .base.minver = 0,
.init = nv50_fifo_chan_init, .base.maxver = 0,
.fini = nv50_fifo_chan_fini, .ctor = nv50_fifo_dma_new,
.map = _nvkm_fifo_channel_map,
.rd32 = _nvkm_fifo_channel_rd32,
.wr32 = _nvkm_fifo_channel_wr32,
.ntfy = _nvkm_fifo_channel_ntfy
};
struct nvkm_oclass
nv50_fifo_sclass[] = {
{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
{}
}; };
...@@ -47,6 +47,15 @@ g84_fifo_uevent_func = { ...@@ -47,6 +47,15 @@ g84_fifo_uevent_func = {
.fini = g84_fifo_uevent_fini, .fini = g84_fifo_uevent_fini,
}; };
static const struct nvkm_fifo_func
g84_fifo_func = {
.chan = {
&g84_fifo_dma_oclass,
&g84_fifo_gpfifo_oclass,
NULL
},
};
static int static int
g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
...@@ -61,6 +70,8 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -61,6 +70,8 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
fifo->base.func = &g84_fifo_func;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
false, &fifo->runlist[0]); false, &fifo->runlist[0]);
if (ret) if (ret)
...@@ -77,8 +88,6 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -77,8 +88,6 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(fifo)->unit = 0x00000100; nv_subdev(fifo)->unit = 0x00000100;
nv_subdev(fifo)->intr = nv04_fifo_intr; nv_subdev(fifo)->intr = nv04_fifo_intr;
nv_engine(fifo)->cclass = &g84_fifo_cclass;
nv_engine(fifo)->sclass = g84_fifo_sclass;
fifo->base.pause = nv04_fifo_pause; fifo->base.pause = nv04_fifo_pause;
fifo->base.start = nv04_fifo_start; fifo->base.start = nv04_fifo_start;
return 0; return 0;
......
...@@ -58,28 +58,26 @@ gf100_fifo_uevent_func = { ...@@ -58,28 +58,26 @@ gf100_fifo_uevent_func = {
void void
gf100_fifo_runlist_update(struct gf100_fifo *fifo) gf100_fifo_runlist_update(struct gf100_fifo *fifo)
{ {
struct gf100_fifo_chan *chan;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_memory *cur; struct nvkm_memory *cur;
int i, p; int nr = 0;
mutex_lock(&nv_subdev(fifo)->mutex); mutex_lock(&nv_subdev(fifo)->mutex);
cur = fifo->runlist.mem[fifo->runlist.active]; cur = fifo->runlist.mem[fifo->runlist.active];
fifo->runlist.active = !fifo->runlist.active; fifo->runlist.active = !fifo->runlist.active;
nvkm_kmap(cur); nvkm_kmap(cur);
for (i = 0, p = 0; i < 128; i++) { list_for_each_entry(chan, &fifo->chan, head) {
struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i]; nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
if (chan && chan->state == RUNNING) { nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
nvkm_wo32(cur, p + 0, i); nr++;
nvkm_wo32(cur, p + 4, 0x00000004);
p += 8;
}
} }
nvkm_done(cur); nvkm_done(cur);
nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3)); nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
if (wait_event_timeout(fifo->runlist.wait, if (wait_event_timeout(fifo->runlist.wait,
!(nvkm_rd32(device, 0x00227c) & 0x00100000), !(nvkm_rd32(device, 0x00227c) & 0x00100000),
...@@ -166,7 +164,8 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine, ...@@ -166,7 +164,8 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
assert_spin_locked(&fifo->base.lock); assert_spin_locked(&fifo->base.lock);
nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
chan->state = KILLED; list_del_init(&chan->head);
chan->killed = true;
fifo->mask |= 1ULL << nv_engidx(engine); fifo->mask |= 1ULL << nv_engidx(engine);
schedule_work(&fifo->fault); schedule_work(&fifo->fault);
...@@ -198,11 +197,15 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo) ...@@ -198,11 +197,15 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
(void)save; (void)save;
if (busy && unk0 && unk1) { if (busy && unk0 && unk1) {
if (!(chan = (void *)fifo->base.channel[chid])) list_for_each_entry(chan, &fifo->chan, head) {
continue; if (chan->base.chid == chid) {
if (!(engine = gf100_fifo_engine(fifo, engn))) engine = gf100_fifo_engine(fifo, engn);
continue; if (!engine)
gf100_fifo_recover(fifo, engine, chan); break;
gf100_fifo_recover(fifo, engine, chan);
break;
}
}
} }
} }
spin_unlock_irqrestore(&fifo->base.lock, flags); spin_unlock_irqrestore(&fifo->base.lock, flags);
...@@ -343,7 +346,8 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit) ...@@ -343,7 +346,8 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
write ? "write" : "read", (u64)vahi << 32 | valo, write ? "write" : "read", (u64)vahi << 32 | valo,
unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
reason, er ? er->name : "", chan ? chan->chid : -1, reason, er ? er->name : "", chan ? chan->chid : -1,
(u64)inst << 12, nvkm_client_name(chan)); (u64)inst << 12,
chan ? chan->object.client->name : "unknown");
if (engine && chan) if (engine && chan)
gf100_fifo_recover(fifo, engine, (void *)chan); gf100_fifo_recover(fifo, engine, (void *)chan);
...@@ -369,6 +373,8 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit) ...@@ -369,6 +373,8 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f; u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
u32 subc = (addr & 0x00070000) >> 16; u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc); u32 mthd = (addr & 0x00003ffc);
struct nvkm_fifo_chan *chan;
unsigned long flags;
u32 show= stat; u32 show= stat;
char msg[128]; char msg[128];
...@@ -381,11 +387,13 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit) ...@@ -381,11 +387,13 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
if (show) { if (show) {
nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show); nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d " chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
"mthd %04x data %08x\n", nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
unit, show, msg, chid, "subc %d mthd %04x data %08x\n",
nvkm_client_name_for_fifo_chid(&fifo->base, chid), unit, show, msg, chid, chan ? chan->inst->addr : 0,
chan ? chan->object.client->name : "unknown",
subc, mthd, data); subc, mthd, data);
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
} }
nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
...@@ -579,6 +587,14 @@ gf100_fifo_dtor(struct nvkm_object *object) ...@@ -579,6 +587,14 @@ gf100_fifo_dtor(struct nvkm_object *object)
nvkm_fifo_destroy(&fifo->base); nvkm_fifo_destroy(&fifo->base);
} }
static const struct nvkm_fifo_func
gf100_fifo_func = {
.chan = {
&gf100_fifo_gpfifo_oclass,
NULL
},
};
static int static int
gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
...@@ -594,6 +610,9 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -594,6 +610,9 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
fifo->base.func = &gf100_fifo_func;
INIT_LIST_HEAD(&fifo->chan);
INIT_WORK(&fifo->fault, gf100_fifo_recover_work); INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
...@@ -625,8 +644,6 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -625,8 +644,6 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(fifo)->unit = 0x00000100; nv_subdev(fifo)->unit = 0x00000100;
nv_subdev(fifo)->intr = gf100_fifo_intr; nv_subdev(fifo)->intr = gf100_fifo_intr;
nv_engine(fifo)->cclass = &gf100_fifo_cclass;
nv_engine(fifo)->sclass = gf100_fifo_sclass;
return 0; return 0;
} }
......
#ifndef __GF100_FIFO_H__ #ifndef __GF100_FIFO_H__
#define __GF100_FIFO_H__ #define __GF100_FIFO_H__
#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
#include "priv.h" #include "priv.h"
#include <subdev/mmu.h>
struct gf100_fifo { struct gf100_fifo {
struct nvkm_fifo base; struct nvkm_fifo base;
struct list_head chan;
struct work_struct fault; struct work_struct fault;
u64 mask; u64 mask;
......
...@@ -32,23 +32,6 @@ ...@@ -32,23 +32,6 @@
#include <nvif/class.h> #include <nvif/class.h>
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static const struct {
u64 subdev;
u64 mask;
} fifo_engine[] = {
_(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_CE2)),
_(NVDEV_ENGINE_MSPDEC , 0),
_(NVDEV_ENGINE_MSPPP , 0),
_(NVDEV_ENGINE_MSVLD , 0),
_(NVDEV_ENGINE_CE0 , 0),
_(NVDEV_ENGINE_CE1 , 0),
_(NVDEV_ENGINE_MSENC , 0),
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
static void static void
gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index) gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{ {
...@@ -76,28 +59,26 @@ void ...@@ -76,28 +59,26 @@ void
gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
{ {
struct gk104_fifo_engn *engn = &fifo->engine[engine]; struct gk104_fifo_engn *engn = &fifo->engine[engine];
struct gk104_fifo_chan *chan;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_memory *cur; struct nvkm_memory *cur;
int i, p; int nr = 0;
mutex_lock(&nv_subdev(fifo)->mutex); mutex_lock(&nv_subdev(fifo)->mutex);
cur = engn->runlist[engn->cur_runlist]; cur = engn->runlist[engn->cur_runlist];
engn->cur_runlist = !engn->cur_runlist; engn->cur_runlist = !engn->cur_runlist;
nvkm_kmap(cur); nvkm_kmap(cur);
for (i = 0, p = 0; i < fifo->base.max; i++) { list_for_each_entry(chan, &engn->chan, head) {
struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i]; nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
if (chan && chan->state == RUNNING && chan->engine == engine) { nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
nvkm_wo32(cur, p + 0, i); nr++;
nvkm_wo32(cur, p + 4, 0x00000000);
p += 8;
}
} }
nvkm_done(cur); nvkm_done(cur);
nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3)); nvkm_wr32(device, 0x002274, (engine << 20) | nr);
if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
(engine * 0x08)) & 0x00100000), (engine * 0x08)) & 0x00100000),
...@@ -106,31 +87,13 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) ...@@ -106,31 +87,13 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
mutex_unlock(&nv_subdev(fifo)->mutex); mutex_unlock(&nv_subdev(fifo)->mutex);
} }
static inline int
gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
{
switch (engn) {
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_CE2 : engn = 0; break;
case NVDEV_ENGINE_MSVLD : engn = 1; break;
case NVDEV_ENGINE_MSPPP : engn = 2; break;
case NVDEV_ENGINE_MSPDEC: engn = 3; break;
case NVDEV_ENGINE_CE0 : engn = 4; break;
case NVDEV_ENGINE_CE1 : engn = 5; break;
case NVDEV_ENGINE_MSENC : engn = 6; break;
default:
return -1;
}
return engn;
}
static inline struct nvkm_engine * static inline struct nvkm_engine *
gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
{ {
if (engn >= ARRAY_SIZE(fifo_engine)) u64 subdevs = gk104_fifo_engine_subdev(engn);
return NULL; if (subdevs)
return nvkm_engine(fifo, fifo_engine[engn].subdev); return nvkm_engine(fifo, __ffs(subdevs));
return NULL;
} }
static void static void
...@@ -149,7 +112,7 @@ gk104_fifo_recover_work(struct work_struct *work) ...@@ -149,7 +112,7 @@ gk104_fifo_recover_work(struct work_struct *work)
spin_unlock_irqrestore(&fifo->base.lock, flags); spin_unlock_irqrestore(&fifo->base.lock, flags);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
engm |= 1 << gk104_fifo_engidx(fifo, engn); engm |= 1 << gk104_fifo_subdev_engine(engn);
nvkm_mask(device, 0x002630, engm, engm); nvkm_mask(device, 0x002630, engm, engm);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
...@@ -157,7 +120,7 @@ gk104_fifo_recover_work(struct work_struct *work) ...@@ -157,7 +120,7 @@ gk104_fifo_recover_work(struct work_struct *work)
nvkm_subdev_fini(&engine->subdev, false); nvkm_subdev_fini(&engine->subdev, false);
WARN_ON(nvkm_subdev_init(&engine->subdev)); WARN_ON(nvkm_subdev_init(&engine->subdev));
} }
gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn)); gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
} }
nvkm_wr32(device, 0x00262c, engm); nvkm_wr32(device, 0x00262c, engm);
...@@ -177,7 +140,8 @@ gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, ...@@ -177,7 +140,8 @@ gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
assert_spin_locked(&fifo->base.lock); assert_spin_locked(&fifo->base.lock);
nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
chan->state = KILLED; list_del_init(&chan->head);
chan->killed = true;
fifo->mask |= 1ULL << nv_engidx(engine); fifo->mask |= 1ULL << nv_engidx(engine);
schedule_work(&fifo->fault); schedule_work(&fifo->fault);
...@@ -223,7 +187,7 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) ...@@ -223,7 +187,7 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
u32 engn; u32 engn;
spin_lock_irqsave(&fifo->base.lock, flags); spin_lock_irqsave(&fifo->base.lock, flags);
for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
u32 busy = (stat & 0x80000000); u32 busy = (stat & 0x80000000);
u32 next = (stat & 0x07ff0000) >> 16; u32 next = (stat & 0x07ff0000) >> 16;
...@@ -235,11 +199,15 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) ...@@ -235,11 +199,15 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
(void)save; (void)save;
if (busy && chsw) { if (busy && chsw) {
if (!(chan = (void *)fifo->base.channel[chid])) list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
continue; if (chan->base.chid == chid) {
if (!(engine = gk104_fifo_engine(fifo, engn))) engine = gk104_fifo_engine(fifo, engn);
continue; if (!engine)
gk104_fifo_recover(fifo, engine, chan); break;
gk104_fifo_recover(fifo, engine, chan);
break;
}
}
} }
} }
spin_unlock_irqrestore(&fifo->base.lock, flags); spin_unlock_irqrestore(&fifo->base.lock, flags);
...@@ -444,7 +412,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) ...@@ -444,7 +412,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
write ? "write" : "read", (u64)vahi << 32 | valo, write ? "write" : "read", (u64)vahi << 32 | valo,
unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
reason, er ? er->name : "", chan ? chan->chid : -1, reason, er ? er->name : "", chan ? chan->chid : -1,
(u64)inst << 12, nvkm_client_name(chan)); (u64)inst << 12,
chan ? chan->object.client->name : "unknown");
if (engine && chan) if (engine && chan)
gk104_fifo_recover(fifo, engine, (void *)chan); gk104_fifo_recover(fifo, engine, (void *)chan);
...@@ -498,6 +467,8 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) ...@@ -498,6 +467,8 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
u32 subc = (addr & 0x00070000) >> 16; u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc); u32 mthd = (addr & 0x00003ffc);
u32 show = stat; u32 show = stat;
struct nvkm_fifo_chan *chan;
unsigned long flags;
char msg[128]; char msg[128];
if (stat & 0x00800000) { if (stat & 0x00800000) {
...@@ -510,11 +481,13 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) ...@@ -510,11 +481,13 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
if (show) { if (show) {
nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d " chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
"mthd %04x data %08x\n", nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
unit, show, msg, chid, "subc %d mthd %04x data %08x\n",
nvkm_client_name_for_fifo_chid(&fifo->base, chid), unit, show, msg, chid, chan ? chan->inst->addr : 0,
chan ? chan->object.client->name : "unknown",
subc, mthd, data); subc, mthd, data);
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
} }
nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
...@@ -722,7 +695,7 @@ gk104_fifo_dtor(struct nvkm_object *object) ...@@ -722,7 +695,7 @@ gk104_fifo_dtor(struct nvkm_object *object)
nvkm_vm_put(&fifo->user.bar); nvkm_vm_put(&fifo->user.bar);
nvkm_memory_del(&fifo->user.mem); nvkm_memory_del(&fifo->user.mem);
for (i = 0; i < FIFO_ENGINE_NR; i++) { for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
nvkm_memory_del(&fifo->engine[i].runlist[1]); nvkm_memory_del(&fifo->engine[i].runlist[1]);
nvkm_memory_del(&fifo->engine[i].runlist[0]); nvkm_memory_del(&fifo->engine[i].runlist[0]);
} }
...@@ -730,6 +703,14 @@ gk104_fifo_dtor(struct nvkm_object *object) ...@@ -730,6 +703,14 @@ gk104_fifo_dtor(struct nvkm_object *object)
nvkm_fifo_destroy(&fifo->base); nvkm_fifo_destroy(&fifo->base);
} }
static const struct nvkm_fifo_func
gk104_fifo_func = {
.chan = {
&gk104_fifo_gpfifo_oclass,
NULL
},
};
int int
gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
...@@ -747,9 +728,11 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -747,9 +728,11 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
fifo->base.func = &gk104_fifo_func;
INIT_WORK(&fifo->fault, gk104_fifo_recover_work); INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
for (i = 0; i < FIFO_ENGINE_NR; i++) { for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
0x8000, 0x1000, false, 0x8000, 0x1000, false,
&fifo->engine[i].runlist[0]); &fifo->engine[i].runlist[0]);
...@@ -763,6 +746,7 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -763,6 +746,7 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return ret; return ret;
init_waitqueue_head(&fifo->engine[i].wait); init_waitqueue_head(&fifo->engine[i].wait);
INIT_LIST_HEAD(&fifo->engine[i].chan);
} }
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
...@@ -783,8 +767,6 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -783,8 +767,6 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(fifo)->unit = 0x00000100; nv_subdev(fifo)->unit = 0x00000100;
nv_subdev(fifo)->intr = gk104_fifo_intr; nv_subdev(fifo)->intr = gk104_fifo_intr;
nv_engine(fifo)->cclass = &gk104_fifo_cclass;
nv_engine(fifo)->sclass = gk104_fifo_sclass;
return 0; return 0;
} }
......
#ifndef __GK104_FIFO_H__ #ifndef __GK104_FIFO_H__
#define __GK104_FIFO_H__ #define __GK104_FIFO_H__
#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
#include "priv.h" #include "priv.h"
#include <subdev/mmu.h>
struct gk104_fifo_engn { struct gk104_fifo_engn {
struct nvkm_memory *runlist[2]; struct nvkm_memory *runlist[2];
int cur_runlist; int cur_runlist;
wait_queue_head_t wait; wait_queue_head_t wait;
struct list_head chan;
}; };
struct gk104_fifo { struct gk104_fifo {
...@@ -38,4 +42,42 @@ void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine); ...@@ -38,4 +42,42 @@ void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
int gm204_fifo_ctor(struct nvkm_object *, struct nvkm_object *, int gm204_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, void *, u32, struct nvkm_oclass *, void *, u32,
struct nvkm_object **); struct nvkm_object **);
static inline u64
gk104_fifo_engine_subdev(int engine)
{
switch (engine) {
case 0: return (1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_CE2);
case 1: return (1ULL << NVDEV_ENGINE_MSPDEC);
case 2: return (1ULL << NVDEV_ENGINE_MSPPP);
case 3: return (1ULL << NVDEV_ENGINE_MSVLD);
case 4: return (1ULL << NVDEV_ENGINE_CE0);
case 5: return (1ULL << NVDEV_ENGINE_CE1);
case 6: return (1ULL << NVDEV_ENGINE_MSENC);
default:
WARN_ON(1);
return 0;
}
}
static inline int
gk104_fifo_subdev_engine(int subdev)
{
switch (subdev) {
case NVDEV_ENGINE_GR:
case NVDEV_ENGINE_SW:
case NVDEV_ENGINE_CE2 : return 0;
case NVDEV_ENGINE_MSPDEC: return 1;
case NVDEV_ENGINE_MSPPP : return 2;
case NVDEV_ENGINE_MSVLD : return 3;
case NVDEV_ENGINE_CE0 : return 4;
case NVDEV_ENGINE_CE1 : return 5;
case NVDEV_ENGINE_MSENC : return 6;
default:
WARN_ON(1);
return 0;
}
}
#endif #endif
...@@ -24,6 +24,14 @@ ...@@ -24,6 +24,14 @@
#include "gk104.h" #include "gk104.h"
#include "changk104.h" #include "changk104.h"
static const struct nvkm_fifo_func
gm204_fifo_func = {
.chan = {
&gm204_fifo_gpfifo_oclass,
NULL
},
};
int int
gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
...@@ -32,7 +40,7 @@ gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -32,7 +40,7 @@ gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject); int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject);
if (ret == 0) { if (ret == 0) {
struct gk104_fifo *fifo = (void *)*pobject; struct gk104_fifo *fifo = (void *)*pobject;
nv_engine(fifo)->sclass = gm204_fifo_sclass; fifo->base.func = &gm204_fifo_func;
} }
return ret; return ret;
} }
......
...@@ -30,15 +30,14 @@ ...@@ -30,15 +30,14 @@
#include <nvif/unpack.h> #include <nvif/unpack.h>
static int static int
g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct nvkm_oclass *oclass, void *data, u32 size, void *data, u32 size, struct nvkm_object **pobject)
struct nvkm_object **pobject)
{ {
struct nvkm_object *parent = oclass->parent;
union { union {
struct nv50_channel_gpfifo_v0 v0; struct nv50_channel_gpfifo_v0 v0;
} *args = data; } *args = data;
struct nvkm_device *device = parent->engine->subdev.device; struct nv50_fifo *fifo = nv50_fifo(base);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan; struct nv50_fifo_chan *chan;
u64 ioffset, ilength; u64 ioffset, ilength;
int ret; int ret;
...@@ -50,73 +49,46 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -50,73 +49,46 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
"ilength %08x\n", "ilength %08x\n",
args->v0.version, args->v0.vm, args->v0.pushbuf, args->v0.version, args->v0.vm, args->v0.pushbuf,
args->v0.ioffset, args->v0.ilength); args->v0.ioffset, args->v0.ilength);
if (args->v0.vm) if (!args->v0.pushbuf)
return -ENOENT; return -EINVAL;
} else } else
return ret; return ret;
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
0x2000, args->v0.pushbuf, return -ENOMEM;
(1ULL << NVDEV_ENGINE_DMAOBJ) | *pobject = &chan->base.object;
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_MPEG) |
(1ULL << NVDEV_ENGINE_ME) |
(1ULL << NVDEV_ENGINE_VP) |
(1ULL << NVDEV_ENGINE_CIPHER) |
(1ULL << NVDEV_ENGINE_SEC) |
(1ULL << NVDEV_ENGINE_BSP) |
(1ULL << NVDEV_ENGINE_MSVLD) |
(1ULL << NVDEV_ENGINE_MSPDEC) |
(1ULL << NVDEV_ENGINE_MSPPP) |
(1ULL << NVDEV_ENGINE_CE0) |
(1ULL << NVDEV_ENGINE_VIC), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->base.inst = base->base.gpuobj.addr;
args->v0.chid = chan->base.chid;
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj, ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
&chan->ramht); oclass, chan);
if (ret) if (ret)
return ret; return ret;
nv_parent(chan)->context_attach = g84_fifo_context_attach; args->v0.chid = chan->base.chid;
nv_parent(chan)->context_detach = g84_fifo_context_detach;
nv_parent(chan)->object_attach = g84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
ioffset = args->v0.ioffset; ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8); ilength = order_base_2(args->v0.ilength / 8);
nvkm_kmap(base->ramfc); nvkm_kmap(chan->ramfc);
nvkm_wo32(base->ramfc, 0x3c, 0x403f6078); nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(base->ramfc, 0x44, 0x01003fff); nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset)); nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff); nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(base->ramfc, 0x78, 0x00000000); nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x7c, 0x30000001); nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ | (4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4)); (chan->ramht->gpuobj->node->offset >> 4));
nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10); nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12); nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
nvkm_done(base->ramfc); nvkm_done(chan->ramfc);
return 0; return 0;
} }
struct nvkm_ofuncs const struct nvkm_fifo_chan_oclass
g84_fifo_ofuncs_ind = { g84_fifo_gpfifo_oclass = {
.ctor = g84_fifo_chan_ctor_ind, .base.oclass = G82_CHANNEL_GPFIFO,
.dtor = nv50_fifo_chan_dtor, .base.minver = 0,
.init = g84_fifo_chan_init, .base.maxver = 0,
.fini = nv50_fifo_chan_fini, .ctor = g84_fifo_gpfifo_new,
.map = _nvkm_fifo_channel_map,
.rd32 = _nvkm_fifo_channel_rd32,
.wr32 = _nvkm_fifo_channel_wr32,
.ntfy = _nvkm_fifo_channel_ntfy
}; };
...@@ -25,8 +25,10 @@ ...@@ -25,8 +25,10 @@
#include <nvif/class.h> #include <nvif/class.h>
struct nvkm_oclass const struct nvkm_fifo_chan_oclass
gm204_fifo_sclass[] = { gm204_fifo_gpfifo_oclass = {
{ MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs }, .base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
{} .base.minver = 0,
.base.maxver = 0,
.ctor = gk104_fifo_gpfifo_new,
}; };
...@@ -30,15 +30,14 @@ ...@@ -30,15 +30,14 @@
#include <nvif/unpack.h> #include <nvif/unpack.h>
static int static int
nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct nvkm_oclass *oclass, void *data, u32 size, void *data, u32 size, struct nvkm_object **pobject)
struct nvkm_object **pobject)
{ {
struct nvkm_object *parent = oclass->parent;
union { union {
struct nv50_channel_gpfifo_v0 v0; struct nv50_channel_gpfifo_v0 v0;
} *args = data; } *args = data;
struct nvkm_device *device = parent->engine->subdev.device; struct nv50_fifo *fifo = nv50_fifo(base);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan; struct nv50_fifo_chan *chan;
u64 ioffset, ilength; u64 ioffset, ilength;
int ret; int ret;
...@@ -50,61 +49,44 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -50,61 +49,44 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
"ilength %08x\n", "ilength %08x\n",
args->v0.version, args->v0.vm, args->v0.pushbuf, args->v0.version, args->v0.vm, args->v0.pushbuf,
args->v0.ioffset, args->v0.ilength); args->v0.ioffset, args->v0.ilength);
if (args->v0.vm) if (!args->v0.pushbuf)
return -ENOENT; return -EINVAL;
} else } else
return ret; return ret;
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
0x2000, args->v0.pushbuf, return -ENOMEM;
(1ULL << NVDEV_ENGINE_DMAOBJ) | *pobject = &chan->base.object;
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->base.inst = base->base.gpuobj.addr;
args->v0.chid = chan->base.chid;
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj, ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
&chan->ramht); oclass, chan);
if (ret) if (ret)
return ret; return ret;
args->v0.chid = chan->base.chid;
ioffset = args->v0.ioffset; ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8); ilength = order_base_2(args->v0.ilength / 8);
nvkm_kmap(base->ramfc); nvkm_kmap(chan->ramfc);
nvkm_wo32(base->ramfc, 0x3c, 0x403f6078); nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(base->ramfc, 0x44, 0x01003fff); nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset)); nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff); nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(base->ramfc, 0x78, 0x00000000); nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x7c, 0x30000001); nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ | (4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4)); (chan->ramht->gpuobj->node->offset >> 4));
nvkm_done(base->ramfc); nvkm_done(chan->ramfc);
return 0; return 0;
} }
struct nvkm_ofuncs const struct nvkm_fifo_chan_oclass
nv50_fifo_ofuncs_ind = { nv50_fifo_gpfifo_oclass = {
.ctor = nv50_fifo_chan_ctor_ind, .base.oclass = NV50_CHANNEL_GPFIFO,
.dtor = nv50_fifo_chan_dtor, .base.minver = 0,
.init = nv50_fifo_chan_init, .base.maxver = 0,
.fini = nv50_fifo_chan_fini, .ctor = nv50_fifo_gpfifo_new,
.map = _nvkm_fifo_channel_map,
.rd32 = _nvkm_fifo_channel_rd32,
.wr32 = _nvkm_fifo_channel_wr32,
.ntfy = _nvkm_fifo_channel_ntfy
}; };
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "channv04.h" #include "channv04.h"
#include "regsnv04.h" #include "regsnv04.h"
#include <core/client.h>
#include <core/handle.h> #include <core/handle.h>
#include <core/ramht.h> #include <core/ramht.h>
#include <subdev/instmem.h> #include <subdev/instmem.h>
...@@ -136,6 +137,8 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) ...@@ -136,6 +137,8 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
{ {
struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_fifo_chan *chan;
unsigned long flags;
u32 pull0 = nvkm_rd32(device, 0x003250); u32 pull0 = nvkm_rd32(device, 0x003250);
u32 mthd, data; u32 mthd, data;
int ptr; int ptr;
...@@ -157,12 +160,12 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) ...@@ -157,12 +160,12 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
if (!(pull0 & 0x00000100) || if (!(pull0 & 0x00000100) ||
!nv04_fifo_swmthd(device, chid, mthd, data)) { !nv04_fifo_swmthd(device, chid, mthd, data)) {
const char *client_name = chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
nvkm_client_name_for_fifo_chid(&fifo->base, chid);
nvkm_error(subdev, "CACHE_ERROR - " nvkm_error(subdev, "CACHE_ERROR - "
"ch %d [%s] subc %d mthd %04x data %08x\n", "ch %d [%s] subc %d mthd %04x data %08x\n",
chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, chid, chan ? chan->object.client->name : "unknown",
data); (mthd >> 13) & 7, mthd & 0x1ffc, data);
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
} }
nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
...@@ -189,10 +192,12 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) ...@@ -189,10 +192,12 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
u32 dma_put = nvkm_rd32(device, 0x003240); u32 dma_put = nvkm_rd32(device, 0x003240);
u32 push = nvkm_rd32(device, 0x003220); u32 push = nvkm_rd32(device, 0x003220);
u32 state = nvkm_rd32(device, 0x003228); u32 state = nvkm_rd32(device, 0x003228);
const char *client_name; struct nvkm_fifo_chan *chan;
unsigned long flags;
client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid); const char *name;
chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
name = chan ? chan->object.client->name : "unknown";
if (device->card_type == NV_50) { if (device->card_type == NV_50) {
u32 ho_get = nvkm_rd32(device, 0x003328); u32 ho_get = nvkm_rd32(device, 0x003328);
u32 ho_put = nvkm_rd32(device, 0x003320); u32 ho_put = nvkm_rd32(device, 0x003320);
...@@ -202,7 +207,7 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) ...@@ -202,7 +207,7 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
nvkm_error(subdev, "DMA_PUSHER - " nvkm_error(subdev, "DMA_PUSHER - "
"ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
"ib_put %08x state %08x (err: %s) push %08x\n", "ib_put %08x state %08x (err: %s) push %08x\n",
chid, client_name, ho_get, dma_get, ho_put, dma_put, chid, name, ho_get, dma_get, ho_put, dma_put,
ib_get, ib_put, state, nv_dma_state_err(state), ib_get, ib_put, state, nv_dma_state_err(state),
push); push);
...@@ -217,12 +222,13 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) ...@@ -217,12 +222,13 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
} else { } else {
nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
"state %08x (err: %s) push %08x\n", "state %08x (err: %s) push %08x\n",
chid, client_name, dma_get, dma_put, state, chid, name, dma_get, dma_put, state,
nv_dma_state_err(state), push); nv_dma_state_err(state), push);
if (dma_get != dma_put) if (dma_get != dma_put)
nvkm_wr32(device, 0x003244, dma_put); nvkm_wr32(device, 0x003244, dma_put);
} }
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
nvkm_wr32(device, 0x003228, 0x00000000); nvkm_wr32(device, 0x003228, 0x00000000);
nvkm_wr32(device, 0x003220, 0x00000001); nvkm_wr32(device, 0x003220, 0x00000001);
...@@ -241,7 +247,7 @@ nv04_fifo_intr(struct nvkm_subdev *subdev) ...@@ -241,7 +247,7 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
nvkm_wr32(device, NV03_PFIFO_CACHES, 0); nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1);
get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
if (stat & NV_PFIFO_INTR_CACHE_ERROR) { if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
...@@ -311,7 +317,7 @@ nv04_fifo_init(struct nvkm_object *object) ...@@ -311,7 +317,7 @@ nv04_fifo_init(struct nvkm_object *object)
nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
...@@ -329,6 +335,14 @@ nv04_fifo_dtor(struct nvkm_object *object) ...@@ -329,6 +335,14 @@ nv04_fifo_dtor(struct nvkm_object *object)
nvkm_fifo_destroy(&fifo->base); nvkm_fifo_destroy(&fifo->base);
} }
static const struct nvkm_fifo_func
nv04_fifo_func = {
.chan = {
&nv04_fifo_dma_oclass,
NULL
},
};
static int static int
nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
...@@ -342,10 +356,10 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -342,10 +356,10 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
fifo->base.func = &nv04_fifo_func;
nv_subdev(fifo)->unit = 0x00000100; nv_subdev(fifo)->unit = 0x00000100;
nv_subdev(fifo)->intr = nv04_fifo_intr; nv_subdev(fifo)->intr = nv04_fifo_intr;
nv_engine(fifo)->cclass = &nv04_fifo_cclass;
nv_engine(fifo)->sclass = nv04_fifo_sclass;
fifo->base.pause = nv04_fifo_pause; fifo->base.pause = nv04_fifo_pause;
fifo->base.start = nv04_fifo_start; fifo->base.start = nv04_fifo_start;
fifo->ramfc_desc = nv04_ramfc; fifo->ramfc_desc = nv04_ramfc;
......
#ifndef __NV04_FIFO_H__ #ifndef __NV04_FIFO_H__
#define __NV04_FIFO_H__ #define __NV04_FIFO_H__
#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
#include "priv.h" #include "priv.h"
struct ramfc_desc { struct ramfc_desc {
...@@ -15,14 +16,6 @@ struct nv04_fifo { ...@@ -15,14 +16,6 @@ struct nv04_fifo {
struct ramfc_desc *ramfc_desc; struct ramfc_desc *ramfc_desc;
}; };
struct nv04_fifo_base {
struct nvkm_fifo_base base;
};
int nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
void nv04_fifo_dtor(struct nvkm_object *); void nv04_fifo_dtor(struct nvkm_object *);
int nv04_fifo_init(struct nvkm_object *); int nv04_fifo_init(struct nvkm_object *);
#endif #endif
...@@ -39,16 +39,11 @@ nv10_ramfc[] = { ...@@ -39,16 +39,11 @@ nv10_ramfc[] = {
{} {}
}; };
static struct nvkm_oclass static const struct nvkm_fifo_func
nv10_fifo_cclass = { nv10_fifo_func = {
.handle = NV_ENGCTX(FIFO, 0x10), .chan = {
.ofuncs = &(struct nvkm_ofuncs) { &nv10_fifo_dma_oclass,
.ctor = nv04_fifo_context_ctor, NULL
.dtor = _nvkm_fifo_context_dtor,
.init = _nvkm_fifo_context_init,
.fini = _nvkm_fifo_context_fini,
.rd32 = _nvkm_fifo_context_rd32,
.wr32 = _nvkm_fifo_context_wr32,
}, },
}; };
...@@ -65,10 +60,10 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -65,10 +60,10 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
fifo->base.func = &nv10_fifo_func;
nv_subdev(fifo)->unit = 0x00000100; nv_subdev(fifo)->unit = 0x00000100;
nv_subdev(fifo)->intr = nv04_fifo_intr; nv_subdev(fifo)->intr = nv04_fifo_intr;
nv_engine(fifo)->cclass = &nv10_fifo_cclass;
nv_engine(fifo)->sclass = nv10_fifo_sclass;
fifo->base.pause = nv04_fifo_pause; fifo->base.pause = nv04_fifo_pause;
fifo->base.start = nv04_fifo_start; fifo->base.start = nv04_fifo_start;
fifo->ramfc_desc = nv10_ramfc; fifo->ramfc_desc = nv10_ramfc;
......
...@@ -47,19 +47,6 @@ nv17_ramfc[] = { ...@@ -47,19 +47,6 @@ nv17_ramfc[] = {
{} {}
}; };
static struct nvkm_oclass
nv17_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x17),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fifo_context_ctor,
.dtor = _nvkm_fifo_context_dtor,
.init = _nvkm_fifo_context_init,
.fini = _nvkm_fifo_context_fini,
.rd32 = _nvkm_fifo_context_rd32,
.wr32 = _nvkm_fifo_context_wr32,
},
};
static int static int
nv17_fifo_init(struct nvkm_object *object) nv17_fifo_init(struct nvkm_object *object)
{ {
...@@ -85,7 +72,7 @@ nv17_fifo_init(struct nvkm_object *object) ...@@ -85,7 +72,7 @@ nv17_fifo_init(struct nvkm_object *object)
nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 | nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
0x00010000); 0x00010000);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
...@@ -96,6 +83,14 @@ nv17_fifo_init(struct nvkm_object *object) ...@@ -96,6 +83,14 @@ nv17_fifo_init(struct nvkm_object *object)
return 0; return 0;
} }
static const struct nvkm_fifo_func
nv17_fifo_func = {
.chan = {
&nv17_fifo_dma_oclass,
NULL
},
};
static int static int
nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
...@@ -109,10 +104,10 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -109,10 +104,10 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
fifo->base.func = &nv17_fifo_func;
nv_subdev(fifo)->unit = 0x00000100; nv_subdev(fifo)->unit = 0x00000100;
nv_subdev(fifo)->intr = nv04_fifo_intr; nv_subdev(fifo)->intr = nv04_fifo_intr;
nv_engine(fifo)->cclass = &nv17_fifo_cclass;
nv_engine(fifo)->sclass = nv17_fifo_sclass;
fifo->base.pause = nv04_fifo_pause; fifo->base.pause = nv04_fifo_pause;
fifo->base.start = nv04_fifo_start; fifo->base.start = nv04_fifo_start;
fifo->ramfc_desc = nv17_ramfc; fifo->ramfc_desc = nv17_ramfc;
......
...@@ -56,19 +56,6 @@ nv40_ramfc[] = { ...@@ -56,19 +56,6 @@ nv40_ramfc[] = {
{} {}
}; };
static struct nvkm_oclass
nv40_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x40),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_fifo_context_ctor,
.dtor = _nvkm_fifo_context_dtor,
.init = _nvkm_fifo_context_init,
.fini = _nvkm_fifo_context_fini,
.rd32 = _nvkm_fifo_context_rd32,
.wr32 = _nvkm_fifo_context_wr32,
},
};
static int static int
nv40_fifo_init(struct nvkm_object *object) nv40_fifo_init(struct nvkm_object *object)
{ {
...@@ -115,7 +102,7 @@ nv40_fifo_init(struct nvkm_object *object) ...@@ -115,7 +102,7 @@ nv40_fifo_init(struct nvkm_object *object)
break; break;
} }
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
...@@ -126,6 +113,14 @@ nv40_fifo_init(struct nvkm_object *object) ...@@ -126,6 +113,14 @@ nv40_fifo_init(struct nvkm_object *object)
return 0; return 0;
} }
static const struct nvkm_fifo_func
nv40_fifo_func = {
.chan = {
&nv40_fifo_dma_oclass,
NULL
},
};
static int static int
nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
...@@ -139,10 +134,10 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -139,10 +134,10 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
fifo->base.func = &nv40_fifo_func;
nv_subdev(fifo)->unit = 0x00000100; nv_subdev(fifo)->unit = 0x00000100;
nv_subdev(fifo)->intr = nv04_fifo_intr; nv_subdev(fifo)->intr = nv04_fifo_intr;
nv_engine(fifo)->cclass = &nv40_fifo_cclass;
nv_engine(fifo)->sclass = nv40_fifo_sclass;
fifo->base.pause = nv04_fifo_pause; fifo->base.pause = nv04_fifo_pause;
fifo->base.start = nv04_fifo_start; fifo->base.start = nv04_fifo_start;
fifo->ramfc_desc = nv40_ramfc; fifo->ramfc_desc = nv40_ramfc;
......
#ifndef __NV50_FIFO_H__ #ifndef __NV50_FIFO_H__
#define __NV50_FIFO_H__ #define __NV50_FIFO_H__
#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
#include "priv.h" #include "priv.h"
struct nv50_fifo { struct nv50_fifo {
......
#ifndef __NVKM_FIFO_PRIV_H__ #ifndef __NVKM_FIFO_PRIV_H__
#define __NVKM_FIFO_PRIV_H__ #define __NVKM_FIFO_PRIV_H__
#include <engine/fifo.h> #include <engine/fifo.h>
#include <core/engctx.h>
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *); void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *); void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
......
...@@ -230,7 +230,8 @@ nv20_gr_intr(struct nvkm_subdev *subdev) ...@@ -230,7 +230,8 @@ nv20_gr_intr(struct nvkm_subdev *subdev)
"nstatus %08x [%s] ch %d [%s] subc %d " "nstatus %08x [%s] ch %d [%s] subc %d "
"class %04x mthd %04x data %08x\n", "class %04x mthd %04x data %08x\n",
show, msg, nsource, src, nstatus, sta, chid, show, msg, nsource, src, nstatus, sta, chid,
nvkm_client_name(chan), subc, class, mthd, data); chan ? chan->object.client->name : "unknown",
subc, class, mthd, data);
} }
nvkm_fifo_chan_put(device->fifo, flags, &chan); nvkm_fifo_chan_put(device->fifo, flags, &chan);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment