Commit 8ab849d6 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo: add new engine context handling

Builds on the context tracking that was added earlier.

- marks engine context PTEs as 'priv' where possible
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent 3647c53b
......@@ -8,11 +8,6 @@ struct nvkm_fault_data;
#define NVKM_FIFO_ENGN_NR 16
struct nvkm_fifo_engn {
struct nvkm_object *object;
int refcount;
};
struct nvkm_chan {
const struct nvkm_chan_func *func;
char name[64];
......@@ -41,13 +36,10 @@ struct nvkm_chan {
struct list_head cctxs;
struct nvkm_fifo *fifo;
struct nvkm_object object;
struct list_head head;
struct nvkm_gpuobj *push;
struct nvkm_fifo_engn engn[NVKM_FIFO_ENGN_NR];
};
struct nvkm_chan *nvkm_chan_get_chid(struct nvkm_engine *, int id, unsigned long *irqflags);
......@@ -78,7 +70,6 @@ struct nvkm_fifo {
struct nvkm_vma *bar1;
} userd;
int nr;
spinlock_t lock;
struct mutex mutex;
};
......
......@@ -318,7 +318,7 @@ int
nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fifo *fifo)
{
int ret, nr;
int ret;
fifo->func = func;
INIT_LIST_HEAD(&fifo->runqs);
......@@ -335,9 +335,6 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
if (ret)
return ret;
nr = func->chid_nr(fifo);
fifo->nr = nr;
if (func->nonstall) {
ret = nvkm_event_init(func->nonstall, &fifo->engine.subdev, 1, 1,
&fifo->nonstall.event);
......
......@@ -25,6 +25,7 @@
#include "runl.h"
#include "priv.h"
#include <core/gpuobj.h>
#include <subdev/mmu.h>
static void
......@@ -37,6 +38,7 @@ nvkm_cgrp_ectx_put(struct nvkm_cgrp *cgrp, struct nvkm_ectx **pectx)
if (refcount_dec_and_test(&ectx->refs)) {
CGRP_TRACE(cgrp, "dtor ectx %d[%s]", engn->id, engn->engine->subdev.name);
nvkm_object_del(&ectx->object);
list_del(&ectx->head);
kfree(ectx);
}
......@@ -49,6 +51,11 @@ static int
nvkm_cgrp_ectx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_ectx **pectx,
struct nvkm_chan *chan, struct nvkm_client *client)
{
struct nvkm_engine *engine = engn->engine;
struct nvkm_oclass cclass = {
.client = client,
.engine = engine,
};
struct nvkm_ectx *ectx;
int ret = 0;
......@@ -67,7 +74,18 @@ nvkm_cgrp_ectx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_e
ectx->engn = engn;
refcount_set(&ectx->refs, 1);
refcount_set(&ectx->uses, 0);
list_add_tail(&ectx->head, &cgrp->ectxs);
/* Allocate the HW structures. */
if (engine->func->fifo.cclass)
ret = engine->func->fifo.cclass(chan, &cclass, &ectx->object);
else if (engine->func->cclass)
ret = nvkm_object_new_(engine->func->cclass, &cclass, NULL, 0, &ectx->object);
if (ret)
nvkm_cgrp_ectx_put(cgrp, pectx);
return ret;
}
......@@ -81,6 +99,8 @@ nvkm_cgrp_vctx_put(struct nvkm_cgrp *cgrp, struct nvkm_vctx **pvctx)
if (refcount_dec_and_test(&vctx->refs)) {
CGRP_TRACE(cgrp, "dtor vctx %d[%s]", engn->id, engn->engine->subdev.name);
nvkm_vmm_put(vctx->vmm, &vctx->vma);
nvkm_gpuobj_del(&vctx->inst);
nvkm_cgrp_ectx_put(cgrp, &vctx->ectx);
if (vctx->vmm) {
......@@ -130,6 +150,21 @@ nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_c
vctx->vmm = nvkm_vmm_ref(chan->vmm);
refcount_set(&vctx->refs, 1);
list_add_tail(&vctx->head, &cgrp->vctxs);
/* MMU on some GPUs needs to know engine usage for TLB invalidation. */
if (vctx->vmm)
atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]);
/* Allocate the HW structures. */
if (engn->func->bind) {
ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst);
if (ret == 0 && engn->func->ctor)
ret = engn->func->ctor(engn, vctx);
}
if (ret)
nvkm_cgrp_vctx_put(cgrp, pvctx);
return ret;
}
......
......@@ -10,12 +10,18 @@ struct nvkm_vctx {
struct nvkm_vmm *vmm;
refcount_t refs;
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
struct list_head head;
};
struct nvkm_ectx {
struct nvkm_engn *engn;
refcount_t refs;
refcount_t uses;
struct nvkm_object *object;
struct list_head head;
};
......
......@@ -28,7 +28,6 @@
#include "runl.h"
#include "priv.h"
#include <core/client.h>
#include <core/oproxy.h>
#include <core/ramht.h>
#include <subdev/mmu.h>
......@@ -46,94 +45,15 @@ struct nvkm_fifo_chan_object {
int hash;
};
static struct nvkm_fifo_engn *
nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
{
int engi = chan->fifo->func->engine_id(chan->fifo, engine);
if (engi >= 0)
return &chan->engn[engi];
return NULL;
}
static int
nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
{
struct nvkm_fifo_chan_object *object =
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.object->engine;
struct nvkm_fifo_chan *chan = object->chan;
struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
const char *name = engine->subdev.name;
int ret = 0;
if (chan->func->engine_fini) {
ret = chan->func->engine_fini(chan, engine, suspend);
if (ret) {
nvif_error(&chan->object,
"detach %s failed, %d\n", name, ret);
return ret;
}
}
if (engn->object) {
ret = nvkm_object_fini(engn->object, suspend);
if (ret && suspend)
return ret;
}
nvif_trace(&chan->object, "detached %s\n", name);
return ret;
}
static int
nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
{
struct nvkm_fifo_chan_object *object =
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.object->engine;
struct nvkm_fifo_chan *chan = object->chan;
struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
const char *name = engine->subdev.name;
int ret;
if (engn->object) {
ret = nvkm_object_init(engn->object);
if (ret)
return ret;
}
if (chan->func->engine_init) {
ret = chan->func->engine_init(chan, engine);
if (ret) {
nvif_error(&chan->object,
"attach %s failed, %d\n", name, ret);
return ret;
}
}
nvif_trace(&chan->object, "attached %s\n", name);
return 0;
}
static void
nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
{
struct nvkm_fifo_chan_object *object =
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.base.engine;
struct nvkm_fifo_chan *chan = object->chan;
struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
if (chan->func->object_dtor)
chan->func->object_dtor(chan, object->hash);
if (!--engn->refcount) {
if (chan->func->engine_dtor)
chan->func->engine_dtor(chan, engine);
nvkm_object_del(&engn->object);
if (chan->vmm)
atomic_dec(&chan->vmm->engref[engine->subdev.type]);
}
}
static const struct nvkm_oproxy_func
......@@ -147,7 +67,8 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
{
struct nvkm_engine *engine = oclass->engine;
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
struct nvkm_ectx *engn = nvkm_list_find(engn, &chan->cgrp->ectxs, head,
engn->engn->engine == engine);
struct nvkm_fifo_chan_object *object;
int ret = 0;
......@@ -157,33 +78,6 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
object->chan = chan;
*pobject = &object->oproxy.base;
if (!engn->refcount++) {
struct nvkm_oclass cclass = {
.client = oclass->client,
.engine = oclass->engine,
};
if (chan->vmm)
atomic_inc(&chan->vmm->engref[engine->subdev.type]);
if (engine->func->fifo.cclass) {
ret = engine->func->fifo.cclass(chan, &cclass,
&engn->object);
} else
if (engine->func->cclass) {
ret = nvkm_object_new_(engine->func->cclass, &cclass,
NULL, 0, &engn->object);
}
if (ret)
return ret;
if (chan->func->engine_ctor) {
ret = chan->func->engine_ctor(chan, oclass->engine,
engn->object);
if (ret)
return ret;
}
}
ret = oclass->base.ctor(&(const struct nvkm_oclass) {
.base = oclass->base,
......@@ -210,10 +104,16 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
}
void
nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_oproxy *oproxy, struct nvkm_cctx *cctx)
nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx)
{
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_runl *runl = cgrp->runl;
struct nvkm_engine *engine = engn->engine;
if (!engn->func->bind)
return;
CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name);
/* Prevent any channel in channel group from being rescheduled, kick them
* off host and any engine(s) they're loaded on.
......@@ -225,10 +125,7 @@ nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_oproxy *oproxy, struct n
nvkm_chan_preempt(chan, true);
/* Update context pointer. */
if (cctx)
nvkm_fifo_chan_child_init(nvkm_oproxy(oproxy->object));
else
nvkm_fifo_chan_child_fini(nvkm_oproxy(oproxy->object), false);
engn->func->bind(engn, cctx, chan);
/* Resume normal operation. */
if (cgrp->hw)
......@@ -558,10 +455,6 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *fn,
*func = *fifo->func->chan.func;
func->dtor = fn->dtor;
func->engine_ctor = fn->engine_ctor;
func->engine_dtor = fn->engine_dtor;
func->engine_init = fn->engine_init;
func->engine_fini = fn->engine_fini;
func->object_ctor = fn->object_ctor;
func->object_dtor = fn->object_dtor;
......@@ -572,7 +465,6 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *fn,
atomic_set(&chan->errored, 0);
nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
chan->fifo = fifo;
INIT_LIST_HEAD(&chan->cctxs);
INIT_LIST_HEAD(&chan->head);
......
......@@ -52,12 +52,6 @@ struct nvkm_chan_func {
u32 (*doorbell_handle)(struct nvkm_chan *);
void *(*dtor)(struct nvkm_fifo_chan *);
int (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
struct nvkm_object *);
void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
int (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *);
int (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *,
bool suspend);
int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
void (*object_dtor)(struct nvkm_fifo_chan *, int);
};
......@@ -78,8 +72,7 @@ int nvkm_chan_preempt_locked(struct nvkm_chan *, bool wait);
int nvkm_chan_cctx_get(struct nvkm_chan *, struct nvkm_engn *, struct nvkm_cctx **,
struct nvkm_client * /*TODO: remove need for this */);
void nvkm_chan_cctx_put(struct nvkm_chan *, struct nvkm_cctx **);
struct nvkm_oproxy;
void nvkm_chan_cctx_bind(struct nvkm_chan *, struct nvkm_oproxy *, struct nvkm_cctx *);
void nvkm_chan_cctx_bind(struct nvkm_chan *, struct nvkm_engn *, struct nvkm_cctx *);
#define CHAN_PRCLI(c,l,p,f,a...) CGRP_PRINT((c)->cgrp, l, p, "%04x:[%s]"f, (c)->id, (c)->name, ##a)
#define CHAN_PRINT(c,l,p,f,a...) CGRP_PRINT((c)->cgrp, l, p, "%04x:"f, (c)->id, ##a)
......
......@@ -30,111 +30,6 @@
#include <nvif/cl826e.h>
static int
g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
{
switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : return -1;
case NVKM_ENGINE_GR : return 0x0020;
case NVKM_ENGINE_VP :
case NVKM_ENGINE_MSPDEC: return 0x0040;
case NVKM_ENGINE_MPEG :
case NVKM_ENGINE_MSPPP : return 0x0060;
case NVKM_ENGINE_BSP :
case NVKM_ENGINE_MSVLD : return 0x0080;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : return 0x00a0;
case NVKM_ENGINE_CE : return 0x00c0;
default:
WARN_ON(1);
return -1;
}
}
static int
g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
struct nv50_fifo *fifo = chan->fifo;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
u32 engn, save;
int offset;
bool done;
offset = g84_fifo_chan_engine_addr(engine);
if (offset < 0)
return 0;
engn = fifo->base.func->engine_id(&fifo->base, engine) - 1;
save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
done = nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
break;
) >= 0;
nvkm_wr32(device, 0x002520, save);
if (!done) {
nvkm_error(subdev, "channel %d [%s] unload timeout\n",
chan->base.chid, chan->base.object.client->name);
if (suspend)
return -EBUSY;
}
nvkm_kmap(chan->eng);
nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
nvkm_done(chan->eng);
return 0;
}
static int
g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
u64 limit, start;
int offset;
offset = g84_fifo_chan_engine_addr(engine);
if (offset < 0)
return 0;
limit = engn->addr + engn->size - 1;
start = engn->addr;
nvkm_kmap(chan->eng);
nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
nvkm_done(chan->eng);
return 0;
}
static int
g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine,
struct nvkm_object *object)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
if (g84_fifo_chan_engine_addr(engine) < 0)
return 0;
return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
}
static int
g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
......@@ -169,10 +64,6 @@ g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
static const struct nvkm_fifo_chan_func
g84_fifo_chan_func = {
.dtor = nv50_fifo_chan_dtor,
.engine_ctor = g84_fifo_chan_engine_ctor,
.engine_dtor = nv50_fifo_chan_engine_dtor,
.engine_init = g84_fifo_chan_engine_init,
.engine_fini = g84_fifo_chan_engine_fini,
.object_ctor = g84_fifo_chan_object_ctor,
.object_dtor = nv50_fifo_chan_object_dtor,
};
......
......@@ -16,10 +16,6 @@ struct gf100_fifo_chan {
#define GF100_FIFO_ENGN_CE0 4
#define GF100_FIFO_ENGN_CE1 5
#define GF100_FIFO_ENGN_SW 15
struct gf100_fifo_engn {
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
} engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
......
......@@ -11,10 +11,6 @@ struct gk104_fifo_chan {
int runl;
#define GK104_FIFO_ENGN_SW 15
struct gk104_fifo_engn {
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
} engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
......@@ -22,11 +18,6 @@ extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *);
struct gk104_fifo_engn *gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *, struct nvkm_engine *);
int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
struct nvkm_object *);
void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
struct nvkm_engine *);
int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
......@@ -34,10 +25,6 @@ int gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *,
struct gk104_fifo *, u64 *, u16 *, u64, u64, u64,
u64 *, bool, u32 *, const struct nvkm_oclass *,
struct nvkm_object **);
int gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *,
struct nvkm_engine *);
int gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *,
struct nvkm_engine *, bool);
int tu102_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
......
......@@ -8,12 +8,10 @@
struct nv04_fifo_chan {
struct nvkm_fifo_chan base;
struct nv04_fifo *fifo;
u32 ramfc;
#define NV04_FIFO_ENGN_SW 0
#define NV04_FIFO_ENGN_GR 1
#define NV04_FIFO_ENGN_MPEG 2
#define NV04_FIFO_ENGN_DMA 3
struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
......
......@@ -28,133 +28,6 @@
#include <subdev/mmu.h>
#include <subdev/timer.h>
static int
nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
{
switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : return -1;
case NVKM_ENGINE_GR : return 0x0000;
case NVKM_ENGINE_MPEG : return 0x0060;
default:
WARN_ON(1);
return -1;
}
}
struct nvkm_gpuobj **
nv50_fifo_chan_engine(struct nv50_fifo_chan *chan, struct nvkm_engine *engine)
{
int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
if (engi >= 0)
return &chan->engn[engi];
return NULL;
}
static int
nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
struct nv50_fifo *fifo = chan->fifo;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
int offset, ret = 0;
u32 me;
offset = nv50_fifo_chan_engine_addr(engine);
if (offset < 0)
return 0;
/* HW bug workaround:
*
* PFIFO will hang forever if the connected engines don't report
* that they've processed the context switch request.
*
* In order for the kickoff to work, we need to ensure all the
* connected engines are in a state where they can answer.
*
* Newer chipsets don't seem to suffer from this issue, and well,
* there's also a "ignore these engines" bitmask reg we can use
* if we hit the issue there..
*/
me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
/* do the kickoff... */
nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
break;
) < 0) {
nvkm_error(subdev, "channel %d [%s] unload timeout\n",
chan->base.chid, chan->base.object.client->name);
if (suspend)
ret = -EBUSY;
}
nvkm_wr32(device, 0x00b860, me);
if (ret == 0) {
nvkm_kmap(chan->eng);
nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
nvkm_done(chan->eng);
}
return ret;
}
static int
nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
u64 limit, start;
int offset;
offset = nv50_fifo_chan_engine_addr(engine);
if (offset < 0)
return 0;
limit = engn->addr + engn->size - 1;
start = engn->addr;
nvkm_kmap(chan->eng);
nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
nvkm_done(chan->eng);
return 0;
}
void
nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
nvkm_gpuobj_del(nv50_fifo_chan_engine(chan, engine));
}
static int
nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine,
struct nvkm_object *object)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
if (nv50_fifo_chan_engine_addr(engine) < 0)
return 0;
return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
}
void
nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie)
{
......@@ -193,10 +66,6 @@ nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
static const struct nvkm_fifo_chan_func
nv50_fifo_chan_func = {
.dtor = nv50_fifo_chan_dtor,
.engine_ctor = nv50_fifo_chan_engine_ctor,
.engine_dtor = nv50_fifo_chan_engine_dtor,
.engine_init = nv50_fifo_chan_engine_init,
.engine_fini = nv50_fifo_chan_engine_fini,
.object_ctor = nv50_fifo_chan_object_ctor,
.object_dtor = nv50_fifo_chan_object_dtor,
};
......
......@@ -9,7 +9,6 @@ struct nv50_fifo_chan {
struct nv50_fifo *fifo;
struct nvkm_fifo_chan base;
struct nvkm_gpuobj *eng;
struct nvkm_ramht *ramht;
#define NV50_FIFO_ENGN_SW 0
......@@ -31,14 +30,11 @@ struct nv50_fifo_chan {
#define G84_FIFO_ENGN_BSP 6
#define G84_FIFO_ENGN_MSVLD 6
#define G84_FIFO_ENGN_DMA 7
struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
};
int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
struct nvkm_gpuobj **nv50_fifo_chan_engine(struct nv50_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
......
......@@ -31,121 +31,6 @@
#include <nvif/cl006b.h>
#include <nvif/unpack.h>
static bool
nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
{
switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW:
return false;
case NVKM_ENGINE_GR:
*reg = 0x0032e0;
*ctx = 0x38;
return true;
case NVKM_ENGINE_MPEG:
if (engine->subdev.device->chipset < 0x44)
return false;
*reg = 0x00330c;
*ctx = 0x54;
return true;
default:
WARN_ON(1);
return false;
}
}
static struct nvkm_gpuobj **
nv40_fifo_dma_engn(struct nv04_fifo_chan *chan, struct nvkm_engine *engine)
{
int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
if (engi >= 0)
return &chan->engn[engi];
return NULL;
}
static int
nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
unsigned long flags;
u32 reg, ctx;
int chid;
if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
return 0;
spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
if (chid == chan->base.chid)
nvkm_wr32(device, reg, 0x00000000);
nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
nvkm_done(imem->ramfc);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&fifo->base.lock, flags);
return 0;
}
static int
nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
unsigned long flags;
u32 inst, reg, ctx;
int chid;
if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
return 0;
inst = (*nv40_fifo_dma_engn(chan, engine))->addr >> 4;
spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
if (chid == chan->base.chid)
nvkm_wr32(device, reg, inst);
nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
nvkm_done(imem->ramfc);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&fifo->base.lock, flags);
return 0;
}
static void
nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
nvkm_gpuobj_del(nv40_fifo_dma_engn(chan, engine));
}
static int
nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine,
struct nvkm_object *object)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
u32 reg, ctx;
if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
return 0;
return nvkm_object_bind(object, NULL, 0, nv40_fifo_dma_engn(chan, engine));
}
static int
nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
......@@ -176,10 +61,6 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
static const struct nvkm_fifo_chan_func
nv40_fifo_dma_func = {
.dtor = nv04_fifo_dma_dtor,
.engine_ctor = nv40_fifo_dma_engine_ctor,
.engine_dtor = nv40_fifo_dma_engine_dtor,
.engine_init = nv40_fifo_dma_engine_init,
.engine_fini = nv40_fifo_dma_engine_fini,
.object_ctor = nv40_fifo_dma_object_ctor,
.object_dtor = nv04_fifo_dma_object_dtor,
};
......
......@@ -26,6 +26,7 @@
#include "runl.h"
#include <core/ramht.h>
#include <subdev/timer.h>
#include "nv50.h"
#include "channv50.h"
......@@ -67,7 +68,6 @@ g84_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, b
if (ret)
return ret;
nv50_fifo_chan(chan)->eng = chan->eng;
nv50_fifo_chan(chan)->ramht = chan->ramht;
nvkm_kmap(chan->ramfc);
......@@ -106,8 +106,58 @@ g84_chan = {
.stop = nv50_chan_stop,
};
static void
g84_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
struct nvkm_subdev *subdev = &chan->cgrp->runl->fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u64 start = 0, limit = 0;
u32 flags = 0, ptr0, save;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_GR : ptr0 = 0x0020; break;
case NVKM_ENGINE_VP :
case NVKM_ENGINE_MSPDEC: ptr0 = 0x0040; break;
case NVKM_ENGINE_MPEG :
case NVKM_ENGINE_MSPPP : ptr0 = 0x0060; break;
case NVKM_ENGINE_BSP :
case NVKM_ENGINE_MSVLD : ptr0 = 0x0080; break;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : ptr0 = 0x00a0; break;
case NVKM_ENGINE_CE : ptr0 = 0x00c0; break;
default:
WARN_ON(1);
return;
}
if (!cctx) {
save = nvkm_mask(device, 0x002520, 0x0000003f, BIT(engn->id - 1));
nvkm_wr32(device, 0x0032fc, chan->inst->addr >> 12);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
break;
);
nvkm_wr32(device, 0x002520, save);
} else {
flags = 0x00190000;
start = cctx->vctx->inst->addr;
limit = start + cctx->vctx->inst->size - 1;
}
nvkm_kmap(chan->eng);
nvkm_wo32(chan->eng, ptr0 + 0x00, flags);
nvkm_wo32(chan->eng, ptr0 + 0x04, lower_32_bits(limit));
nvkm_wo32(chan->eng, ptr0 + 0x08, lower_32_bits(start));
nvkm_wo32(chan->eng, ptr0 + 0x0c, upper_32_bits(limit) << 24 |
lower_32_bits(start));
nvkm_wo32(chan->eng, ptr0 + 0x10, 0x00000000);
nvkm_wo32(chan->eng, ptr0 + 0x14, 0x00000000);
nvkm_done(chan->eng);
}
const struct nvkm_engn_func
g84_engn = {
.bind = g84_ectx_bind,
};
static void
......@@ -138,28 +188,6 @@ g84_fifo_nonstall = {
.fini = g84_fifo_nonstall_block,
};
int
g84_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
{
switch (engine->subdev.type) {
case NVKM_ENGINE_SW : return G84_FIFO_ENGN_SW;
case NVKM_ENGINE_GR : return G84_FIFO_ENGN_GR;
case NVKM_ENGINE_MPEG :
case NVKM_ENGINE_MSPPP : return G84_FIFO_ENGN_MPEG;
case NVKM_ENGINE_CE : return G84_FIFO_ENGN_CE0;
case NVKM_ENGINE_VP :
case NVKM_ENGINE_MSPDEC: return G84_FIFO_ENGN_VP;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : return G84_FIFO_ENGN_CIPHER;
case NVKM_ENGINE_BSP :
case NVKM_ENGINE_MSVLD : return G84_FIFO_ENGN_BSP;
case NVKM_ENGINE_DMAOBJ: return G84_FIFO_ENGN_DMA;
default:
WARN_ON(1);
return -1;
}
}
static int
g84_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
......@@ -188,7 +216,6 @@ g84_fifo = {
.runl_ctor = g84_fifo_runl_ctor,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
.engine_id = g84_fifo_engine_id,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.nonstall = &g84_fifo_nonstall,
......
......@@ -55,7 +55,6 @@ g98_fifo = {
.runl_ctor = g98_fifo_runl_ctor,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
.engine_id = g84_fifo_engine_id,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.nonstall = &g84_fifo_nonstall,
......
......@@ -158,6 +158,47 @@ gf100_chan = {
.preempt = gf100_chan_preempt,
};
static void
gf100_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
u64 addr = 0ULL;
u32 ptr0;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_SW : return;
case NVKM_ENGINE_GR : ptr0 = 0x0210; break;
case NVKM_ENGINE_CE : ptr0 = 0x0230 + (engn->engine->subdev.inst * 0x10); break;
case NVKM_ENGINE_MSPDEC: ptr0 = 0x0250; break;
case NVKM_ENGINE_MSPPP : ptr0 = 0x0260; break;
case NVKM_ENGINE_MSVLD : ptr0 = 0x0270; break;
default:
WARN_ON(1);
return;
}
if (cctx) {
addr = cctx->vctx->vma->addr;
addr |= 4ULL;
}
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr));
nvkm_wo32(chan->inst, ptr0 + 4, upper_32_bits(addr));
nvkm_done(chan->inst);
}
static int
gf100_ectx_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
{
int ret;
ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
if (ret)
return ret;
return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
}
bool
gf100_engn_mmu_fault_triggered(struct nvkm_engn *engn)
{
......@@ -250,6 +291,8 @@ gf100_engn = {
.cxid = gf100_engn_cxid,
.mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
.mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
.ctor = gf100_ectx_ctor,
.bind = gf100_ectx_bind,
};
const struct nvkm_engn_func
......@@ -422,22 +465,6 @@ gf100_fifo_nonstall = {
.fini = gf100_fifo_nonstall_block,
};
static int
gf100_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
{
switch (engine->subdev.type) {
case NVKM_ENGINE_GR : return GF100_FIFO_ENGN_GR;
case NVKM_ENGINE_MSPDEC: return GF100_FIFO_ENGN_MSPDEC;
case NVKM_ENGINE_MSPPP : return GF100_FIFO_ENGN_MSPPP;
case NVKM_ENGINE_MSVLD : return GF100_FIFO_ENGN_MSVLD;
case NVKM_ENGINE_CE : return GF100_FIFO_ENGN_CE0 + engine->subdev.inst;
case NVKM_ENGINE_SW : return GF100_FIFO_ENGN_SW;
default:
WARN_ON(1);
return -1;
}
}
static const struct nvkm_enum
gf100_fifo_mmu_fault_engine[] = {
{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
......@@ -935,7 +962,6 @@ gf100_fifo = {
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gf100_fifo_mmu_fault,
.engine_id = gf100_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gf100_runl,
.runq = &gf100_runq,
......
......@@ -35,6 +35,7 @@
#include <subdev/top.h>
#include <nvif/class.h>
#include <nvif/if900d.h>
void
gk104_chan_stop(struct nvkm_chan *chan)
......@@ -130,6 +131,63 @@ gk104_chan = {
.preempt = gf100_chan_preempt,
};
static void
gk104_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
u32 ptr0, ptr1 = 0;
u64 addr = 0ULL;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_SW : return;
case NVKM_ENGINE_GR : ptr0 = 0x0210; break;
case NVKM_ENGINE_SEC : ptr0 = 0x0220; break;
case NVKM_ENGINE_MSPDEC: ptr0 = 0x0250; break;
case NVKM_ENGINE_MSPPP : ptr0 = 0x0260; break;
case NVKM_ENGINE_MSVLD : ptr0 = 0x0270; break;
case NVKM_ENGINE_VIC : ptr0 = 0x0280; break;
case NVKM_ENGINE_MSENC : ptr0 = 0x0290; break;
case NVKM_ENGINE_NVDEC :
ptr1 = 0x0270;
ptr0 = 0x0210;
break;
case NVKM_ENGINE_NVENC :
if (!engn->engine->subdev.inst)
ptr1 = 0x0290;
ptr0 = 0x0210;
break;
default:
WARN_ON(1);
return;
}
if (cctx) {
addr = cctx->vctx->vma->addr;
addr |= 4ULL;
}
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr));
nvkm_wo32(chan->inst, ptr0 + 4, upper_32_bits(addr));
if (ptr1) {
nvkm_wo32(chan->inst, ptr1 + 0, lower_32_bits(addr));
nvkm_wo32(chan->inst, ptr1 + 4, upper_32_bits(addr));
}
nvkm_done(chan->inst);
}
int
gk104_ectx_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
{
struct gf100_vmm_map_v0 args = { .priv = 1 };
int ret;
ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
if (ret)
return ret;
return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, &args, sizeof(args));
}
/*TODO: clean this up */
struct gk104_engn_status {
bool busy;
......@@ -216,6 +274,8 @@ gk104_engn = {
.cxid = gk104_engn_cxid,
.mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
.mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
.ctor = gk104_ectx_ctor,
.bind = gk104_ectx_bind,
};
const struct nvkm_engn_func
......@@ -410,24 +470,6 @@ gk104_runl = {
.preempt_pending = gf100_runl_preempt_pending,
};
int
gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
{
struct gk104_fifo *fifo = gk104_fifo(base);
int engn;
if (engine->subdev.type == NVKM_ENGINE_SW)
return GK104_FIFO_ENGN_SW;
for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
if (fifo->engine[engn].engine == engine)
return engn;
}
WARN_ON(1);
return -1;
}
static const struct nvkm_enum
gk104_fifo_mmu_fault_engine[] = {
{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
......@@ -778,8 +820,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
if (engn < 0)
continue;
fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst);
fifo->engine_nr = max(fifo->engine_nr, engn + 1);
fifo->runlist[tdev->runlist].engm |= BIT(engn);
fifo->runlist[tdev->runlist].engm_sw |= BIT(engn);
if (tdev->type == NVKM_ENGINE_GR)
......@@ -825,7 +865,6 @@ gk104_fifo = {
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk104_runl,
.runq = &gk104_runq,
......
......@@ -14,11 +14,6 @@ struct gk104_fifo {
const struct gk104_fifo_func *func;
struct nvkm_fifo base;
struct {
struct nvkm_engine *engine;
} engine[16];
int engine_nr;
struct {
u32 engm;
u32 engm_sw;
......
......@@ -119,7 +119,6 @@ gk110_fifo = {
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk104_runq,
......
......@@ -63,7 +63,6 @@ gk208_fifo = {
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk208_runq,
......
......@@ -38,7 +38,6 @@ gk20a_fifo = {
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk208_runq,
......
......@@ -143,7 +143,6 @@ gm107_fifo = {
.intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gm107_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gm107_runl,
.runq = &gk208_runq,
......
......@@ -52,7 +52,6 @@ gm200_fifo = {
.intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gm107_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gm107_runl,
.runq = &gk208_runq,
......
......@@ -125,7 +125,6 @@ gp100_fifo = {
.intr_mmu_fault_unit = gp100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gp100_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gp100_runl,
.runq = &gk208_runq,
......
......@@ -31,102 +31,6 @@
#include <nvif/cl906f.h>
#include <nvif/unpack.h>
static u32
gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
switch (engine->subdev.type) {
case NVKM_ENGINE_SW : return 0;
case NVKM_ENGINE_GR : return 0x0210;
case NVKM_ENGINE_CE : return 0x0230 + (engine->subdev.inst * 0x10);
case NVKM_ENGINE_MSPDEC: return 0x0250;
case NVKM_ENGINE_MSPPP : return 0x0260;
case NVKM_ENGINE_MSVLD : return 0x0270;
default:
WARN_ON(1);
return 0;
}
}
static struct gf100_fifo_engn *
gf100_fifo_gpfifo_engine(struct gf100_fifo_chan *chan, struct nvkm_engine *engine)
{
int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
if (engi >= 0)
return &chan->engn[engi];
return NULL;
}
static int
gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0;
if (offset) {
nvkm_kmap(inst);
nvkm_wo32(inst, offset + 0x00, 0x00000000);
nvkm_wo32(inst, offset + 0x04, 0x00000000);
nvkm_done(inst);
}
return ret;
}
static int
gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
if (offset) {
nvkm_kmap(inst);
nvkm_wo32(inst, offset + 0x00, lower_32_bits(engn->vma->addr) | 4);
nvkm_wo32(inst, offset + 0x04, upper_32_bits(engn->vma->addr));
nvkm_done(inst);
}
return 0;
}
static void
gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
nvkm_vmm_put(chan->base.vmm, &engn->vma);
nvkm_gpuobj_del(&engn->inst);
}
static int
gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine,
struct nvkm_object *object)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
int ret;
if (!gf100_fifo_gpfifo_engine_addr(engine))
return 0;
ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
}
static void *
gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
......@@ -136,10 +40,6 @@ gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
static const struct nvkm_fifo_chan_func
gf100_fifo_gpfifo_func = {
.dtor = gf100_fifo_gpfifo_dtor,
.engine_ctor = gf100_fifo_gpfifo_engine_ctor,
.engine_dtor = gf100_fifo_gpfifo_engine_dtor,
.engine_init = gf100_fifo_gpfifo_engine_init,
.engine_fini = gf100_fifo_gpfifo_engine_fini,
};
static int
......
......@@ -33,125 +33,6 @@
#include <nvif/cla06f.h>
#include <nvif/unpack.h>
static u32
gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
switch (engine->subdev.type) {
case NVKM_ENGINE_SW :
case NVKM_ENGINE_CE : return 0;
case NVKM_ENGINE_GR : return 0x0210;
case NVKM_ENGINE_SEC : return 0x0220;
case NVKM_ENGINE_MSPDEC: return 0x0250;
case NVKM_ENGINE_MSPPP : return 0x0260;
case NVKM_ENGINE_MSVLD : return 0x0270;
case NVKM_ENGINE_VIC : return 0x0280;
case NVKM_ENGINE_MSENC : return 0x0290;
case NVKM_ENGINE_NVDEC : return 0x02100270;
case NVKM_ENGINE_NVENC :
if (engine->subdev.inst)
return 0x0210;
return 0x02100290;
default:
WARN_ON(1);
return 0;
}
}
struct gk104_fifo_engn *
gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *chan, struct nvkm_engine *engine)
{
int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
if (engi >= 0)
return &chan->engn[engi];
return NULL;
}
static int
gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct nvkm_gpuobj *inst = chan->base.inst;
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
if (offset) {
nvkm_kmap(inst);
nvkm_wo32(inst, (offset & 0xffff) + 0x00, 0x00000000);
nvkm_wo32(inst, (offset & 0xffff) + 0x04, 0x00000000);
if ((offset >>= 16)) {
nvkm_wo32(inst, offset + 0x00, 0x00000000);
nvkm_wo32(inst, offset + 0x04, 0x00000000);
}
nvkm_done(inst);
}
return 0;
}
static int
gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
if (offset) {
u32 datalo = lower_32_bits(engn->vma->addr) | 0x00000004;
u32 datahi = upper_32_bits(engn->vma->addr);
nvkm_kmap(inst);
nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo);
nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi);
if ((offset >>= 16)) {
nvkm_wo32(inst, offset + 0x00, datalo);
nvkm_wo32(inst, offset + 0x04, datahi);
}
nvkm_done(inst);
}
return 0;
}
void
gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
nvkm_vmm_put(chan->base.vmm, &engn->vma);
nvkm_gpuobj_del(&engn->inst);
}
int
gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine,
struct nvkm_object *object)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
int ret;
if (!gk104_fifo_gpfifo_engine_addr(engine)) {
if (engine->subdev.type != NVKM_ENGINE_CE ||
engine->subdev.device->card_type < GV100)
return 0;
}
ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
if (!gk104_fifo_gpfifo_engine_addr(engine))
return 0;
ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
}
void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
......@@ -162,10 +43,6 @@ gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
const struct nvkm_fifo_chan_func
gk104_fifo_gpfifo_func = {
.dtor = gk104_fifo_gpfifo_dtor,
.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gk104_fifo_gpfifo_engine_init,
.engine_fini = gk104_fifo_gpfifo_engine_fini,
};
static int
......
......@@ -28,87 +28,9 @@
#include <nvif/clc36f.h>
#include <nvif/unpack.h>
static int
gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
{
const u32 mask = ce ? 0x00020000 : 0x00010000;
const u32 data = valid ? mask : 0x00000000;
if (1) {
/* Update engine context validity. */
nvkm_kmap(chan->base.inst);
nvkm_mo32(chan->base.inst, 0x0ac, mask, data);
nvkm_done(chan->base.inst);
}
return 0;
}
int
gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct nvkm_gpuobj *inst = chan->base.inst;
int ret;
if (engine->subdev.type == NVKM_ENGINE_CE) {
ret = gv100_fifo_gpfifo_engine_valid(chan, true, false);
if (ret && suspend)
return ret;
nvkm_kmap(inst);
nvkm_wo32(chan->base.inst, 0x220, 0x00000000);
nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
nvkm_done(inst);
return ret;
}
ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
if (ret && suspend)
return ret;
nvkm_kmap(inst);
nvkm_wo32(inst, 0x0210, 0x00000000);
nvkm_wo32(inst, 0x0214, 0x00000000);
nvkm_done(inst);
return ret;
}
int
gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
if (engine->subdev.type == NVKM_ENGINE_CE) {
const u64 bar2 = nvkm_memory_bar2(engn->inst->memory);
nvkm_kmap(inst);
nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(bar2));
nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(bar2));
nvkm_done(inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true);
}
nvkm_kmap(inst);
nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
nvkm_wo32(inst, 0x214, upper_32_bits(engn->vma->addr));
nvkm_done(inst);
return gv100_fifo_gpfifo_engine_valid(chan, false, true);
}
static const struct nvkm_fifo_chan_func
gv100_fifo_gpfifo = {
.dtor = gk104_fifo_gpfifo_dtor,
.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
.engine_fini = gv100_fifo_gpfifo_engine_fini,
};
int
......
......@@ -31,10 +31,6 @@
static const struct nvkm_fifo_chan_func
tu102_fifo_gpfifo = {
.dtor = gk104_fifo_gpfifo_dtor,
.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
.engine_fini = gv100_fifo_gpfifo_engine_fini,
};
int
......
......@@ -89,16 +89,58 @@ gv100_chan = {
.doorbell_handle = gv100_chan_doorbell_handle,
};
void
gv100_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
u64 addr = 0ULL;
if (cctx) {
addr = cctx->vctx->vma->addr;
addr |= 4ULL;
}
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x210, lower_32_bits(addr));
nvkm_wo32(chan->inst, 0x214, upper_32_bits(addr));
nvkm_mo32(chan->inst, 0x0ac, 0x00010000, cctx ? 0x00010000 : 0x00000000);
nvkm_done(chan->inst);
}
const struct nvkm_engn_func
gv100_engn = {
.chsw = gk104_engn_chsw,
.cxid = gk104_engn_cxid,
.ctor = gk104_ectx_ctor,
.bind = gv100_ectx_bind,
};
void
gv100_ectx_ce_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
const u64 bar2 = cctx ? nvkm_memory_bar2(cctx->vctx->inst->memory) : 0ULL;
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x220, lower_32_bits(bar2));
nvkm_wo32(chan->inst, 0x224, upper_32_bits(bar2));
nvkm_mo32(chan->inst, 0x0ac, 0x00020000, cctx ? 0x00020000 : 0x00000000);
nvkm_done(chan->inst);
}
int
gv100_ectx_ce_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
{
if (nvkm_memory_bar2(vctx->inst->memory) == ~0ULL)
return -EFAULT;
return 0;
}
const struct nvkm_engn_func
gv100_engn_ce = {
.chsw = gk104_engn_chsw,
.cxid = gk104_engn_cxid,
.ctor = gv100_ectx_ce_ctor,
.bind = gv100_ectx_ce_bind,
};
static bool
......@@ -436,7 +478,6 @@ gv100_fifo = {
.intr = gk104_fifo_intr,
.intr_ctxsw_timeout = gv100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gv100_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &gv100_runl,
.runq = &gv100_runq,
......
......@@ -234,20 +234,6 @@ const struct nvkm_runl_func
nv04_runl = {
};
int
nv04_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
{
switch (engine->subdev.type) {
case NVKM_ENGINE_SW : return NV04_FIFO_ENGN_SW;
case NVKM_ENGINE_GR : return NV04_FIFO_ENGN_GR;
case NVKM_ENGINE_MPEG : return NV04_FIFO_ENGN_MPEG;
case NVKM_ENGINE_DMAOBJ: return NV04_FIFO_ENGN_DMA;
default:
WARN_ON(1);
return 0;
}
}
static const char *
nv_dma_state_err(u32 state)
{
......@@ -533,7 +519,6 @@ nv04_fifo = {
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv04_fifo_init,
.intr = nv04_fifo_intr,
.engine_id = nv04_fifo_engine_id,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv04_runl,
......
......@@ -97,7 +97,6 @@ nv10_fifo = {
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv04_fifo_init,
.intr = nv04_fifo_intr,
.engine_id = nv04_fifo_engine_id,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv04_runl,
......
......@@ -126,7 +126,6 @@ nv17_fifo = {
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv17_fifo_init,
.intr = nv04_fifo_intr,
.engine_id = nv04_fifo_engine_id,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv04_runl,
......
......@@ -43,7 +43,6 @@ nv40_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
const u32 base = chan->id * 128;
chan->ramfc_offset = base;
nv04_fifo_chan(chan)->ramfc = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
......@@ -109,8 +108,52 @@ nv40_chan = {
.stop = nv04_chan_stop,
};
static void
nv40_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_memory *ramfc = device->imem->ramfc;
u32 inst = 0x00000000, reg, ctx;
int chid;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_GR:
reg = 0x0032e0;
ctx = 0x38;
break;
case NVKM_ENGINE_MPEG:
if (WARN_ON(device->chipset < 0x44))
return;
reg = 0x00330c;
ctx = 0x54;
break;
default:
WARN_ON(1);
return;
}
if (cctx)
inst = cctx->vctx->inst->addr >> 4;
spin_lock_irq(&fifo->lock);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
chid = nvkm_rd32(device, 0x003204) & (fifo->chid->nr - 1);
if (chid == chan->id)
nvkm_wr32(device, reg, inst);
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, chan->ramfc_offset + ctx, inst);
nvkm_done(ramfc);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irq(&fifo->lock);
}
static const struct nvkm_engn_func
nv40_engn = {
.bind = nv40_ectx_bind,
};
static const struct nvkm_engn_func
......@@ -175,7 +218,6 @@ nv40_fifo = {
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv40_fifo_init,
.intr = nv04_fifo_intr,
.engine_id = nv04_fifo_engine_id,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv04_runl,
......
......@@ -89,7 +89,6 @@ nv50_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
if (ret)
return ret;
nv50_fifo_chan(chan)->eng = chan->eng;
nv50_fifo_chan(chan)->ramht = chan->ramht;
nvkm_kmap(chan->ramfc);
......@@ -139,8 +138,64 @@ nv50_chan = {
.stop = nv50_chan_stop,
};
static void
nv50_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
struct nvkm_subdev *subdev = &chan->cgrp->runl->fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u64 start = 0, limit = 0;
u32 flags = 0, ptr0, save;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_GR : ptr0 = 0x0000; break;
case NVKM_ENGINE_MPEG : ptr0 = 0x0060; break;
default:
WARN_ON(1);
return;
}
if (!cctx) {
/* HW bug workaround:
*
* PFIFO will hang forever if the connected engines don't report
* that they've processed the context switch request.
*
* In order for the kickoff to work, we need to ensure all the
* connected engines are in a state where they can answer.
*
* Newer chipsets don't seem to suffer from this issue, and well,
* there's also a "ignore these engines" bitmask reg we can use
* if we hit the issue there..
*/
save = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
/* Tell engines to save out contexts. */
nvkm_wr32(device, 0x0032fc, chan->inst->addr >> 12);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
break;
);
nvkm_wr32(device, 0x00b860, save);
} else {
flags = 0x00190000;
start = cctx->vctx->inst->addr;
limit = start + cctx->vctx->inst->size - 1;
}
nvkm_kmap(chan->eng);
nvkm_wo32(chan->eng, ptr0 + 0x00, flags);
nvkm_wo32(chan->eng, ptr0 + 0x04, lower_32_bits(limit));
nvkm_wo32(chan->eng, ptr0 + 0x08, lower_32_bits(start));
nvkm_wo32(chan->eng, ptr0 + 0x0c, upper_32_bits(limit) << 24 |
lower_32_bits(start));
nvkm_wo32(chan->eng, ptr0 + 0x10, 0x00000000);
nvkm_wo32(chan->eng, ptr0 + 0x14, 0x00000000);
nvkm_done(chan->eng);
}
static const struct nvkm_engn_func
nv50_engn = {
.bind = nv50_ectx_bind,
};
const struct nvkm_engn_func
......@@ -340,7 +395,6 @@ nv50_fifo = {
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
.engine_id = nv04_fifo_engine_id,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv50_runl,
......
......@@ -12,6 +12,4 @@ int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvk
struct nvkm_fifo **);
void *nv50_fifo_dtor(struct nvkm_fifo *);
int g84_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
#endif
......@@ -4,11 +4,13 @@
#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
#include <engine/fifo.h>
#include <core/enum.h>
struct nvkm_cctx;
struct nvkm_cgrp;
struct nvkm_engn;
struct nvkm_memory;
struct nvkm_runl;
struct nvkm_runq;
struct nvkm_vctx;
struct gk104_fifo;
struct nvkm_fifo_chan_oclass;
......@@ -37,7 +39,6 @@ struct nvkm_fifo_func {
const struct nvkm_enum *gpcclient;
} *mmu_fault;
int (*engine_id)(struct nvkm_fifo *, struct nvkm_engine *);
void (*pause)(struct nvkm_fifo *, unsigned long *);
void (*start)(struct nvkm_fifo *, unsigned long *);
......@@ -74,7 +75,6 @@ int nv04_fifo_chid_ctor(struct nvkm_fifo *, int);
int nv04_fifo_runl_ctor(struct nvkm_fifo *);
void nv04_fifo_init(struct nvkm_fifo *);
irqreturn_t nv04_fifo_intr(struct nvkm_inth *);
int nv04_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
extern const struct nvkm_runl_func nv04_runl;
......@@ -138,7 +138,6 @@ extern const struct nvkm_fifo_func_mmu_fault gk104_fifo_mmu_fault;
extern const struct nvkm_enum gk104_fifo_mmu_fault_reason[];
extern const struct nvkm_enum gk104_fifo_mmu_fault_hubclient[];
extern const struct nvkm_enum gk104_fifo_mmu_fault_gpcclient[];
int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
void gk104_runl_insert_chan(struct nvkm_chan *, struct nvkm_memory *, u64);
void gk104_runl_commit(struct nvkm_runl *, struct nvkm_memory *, u32, int);
bool gk104_runl_pending(struct nvkm_runl *);
......@@ -153,6 +152,7 @@ bool gk104_runq_idle(struct nvkm_runq *);
extern const struct nvkm_engn_func gk104_engn;
bool gk104_engn_chsw(struct nvkm_engn *);
int gk104_engn_cxid(struct nvkm_engn *, bool *cgid);
int gk104_ectx_ctor(struct nvkm_engn *, struct nvkm_vctx *);
extern const struct nvkm_engn_func gk104_engn_ce;
extern const struct nvkm_chan_func_userd gk104_chan_userd;
extern const struct nvkm_chan_func_ramfc gk104_chan_ramfc;
......@@ -189,7 +189,10 @@ void gv100_runl_insert_chan(struct nvkm_chan *, struct nvkm_memory *, u64);
void gv100_runl_preempt(struct nvkm_runl *);
extern const struct nvkm_runq_func gv100_runq;
extern const struct nvkm_engn_func gv100_engn;
void gv100_ectx_bind(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
extern const struct nvkm_engn_func gv100_engn_ce;
int gv100_ectx_ce_ctor(struct nvkm_engn *, struct nvkm_vctx *);
void gv100_ectx_ce_bind(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
extern const struct nvkm_chan_func_userd gv100_chan_userd;
extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc;
......
#ifndef __NVKM_RUNL_H__
#define __NVKM_RUNL_H__
#include <core/os.h>
struct nvkm_cctx;
struct nvkm_cgrp;
struct nvkm_chan;
struct nvkm_memory;
struct nvkm_vctx;
enum nvkm_subdev_type;
struct nvkm_engn {
......@@ -12,6 +14,8 @@ struct nvkm_engn {
int (*cxid)(struct nvkm_engn *, bool *cgid);
void (*mmu_fault_trigger)(struct nvkm_engn *);
bool (*mmu_fault_triggered)(struct nvkm_engn *);
int (*ctor)(struct nvkm_engn *, struct nvkm_vctx *);
void (*bind)(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
} *func;
struct nvkm_runl *runl;
int id;
......
......@@ -273,7 +273,6 @@ tu102_fifo = {
.init_pbdmas = tu102_fifo_init_pbdmas,
.intr = tu102_fifo_intr,
.mmu_fault = &tu102_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.nonstall = &gf100_fifo_nonstall,
.runl = &tu102_runl,
.runq = &gv100_runq,
......
......@@ -25,6 +25,7 @@
#include "chid.h"
#include "runl.h"
#include <core/gpuobj.h>
#include <core/oproxy.h>
#include <nvif/if0020.h>
......@@ -74,10 +75,17 @@ nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend)
struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
struct nvkm_chan *chan = uobj->chan;
struct nvkm_cctx *cctx = uobj->cctx;
struct nvkm_ectx *ectx = cctx->vctx->ectx;
if (!ectx->object)
return 0;
/* Unbind engine context from channel, if no longer required. */
if (refcount_dec_and_mutex_lock(&cctx->uses, &chan->cgrp->mutex)) {
nvkm_chan_cctx_bind(chan, oproxy, NULL);
nvkm_chan_cctx_bind(chan, ectx->engn, NULL);
if (refcount_dec_and_test(&ectx->uses))
nvkm_object_fini(ectx->object, false);
mutex_unlock(&chan->cgrp->mutex);
}
......@@ -90,14 +98,24 @@ nvkm_uchan_object_init_0(struct nvkm_oproxy *oproxy)
struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
struct nvkm_chan *chan = uobj->chan;
struct nvkm_cctx *cctx = uobj->cctx;
struct nvkm_ectx *ectx = cctx->vctx->ectx;
int ret = 0;
if (!ectx->object)
return 0;
/* Bind engine context to channel, if it hasn't been already. */
if (!refcount_inc_not_zero(&cctx->uses)) {
mutex_lock(&chan->cgrp->mutex);
if (!refcount_inc_not_zero(&cctx->uses)) {
if (!refcount_inc_not_zero(&ectx->uses)) {
ret = nvkm_object_init(ectx->object);
if (ret == 0)
refcount_set(&ectx->uses, 1);
}
if (ret == 0) {
nvkm_chan_cctx_bind(chan, oproxy, cctx);
nvkm_chan_cctx_bind(chan, ectx->engn, cctx);
refcount_set(&cctx->uses, 1);
}
}
......@@ -112,6 +130,9 @@ nvkm_uchan_object_dtor(struct nvkm_oproxy *oproxy)
{
struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
if (!uobj->cctx)
return;
nvkm_chan_cctx_put(uobj->chan, &uobj->cctx);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment