Commit b084fff2 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo: add common runlist control

- less dependence on waiting for runlist updates, on GPUs that allow it
- supports runqueue selector in RAMRL entries
- completes switch to common runl/cgrp/chan topology info
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent 4d60100a
......@@ -17,6 +17,7 @@ struct nvkm_chan {
const struct nvkm_chan_func *func;
char name[64];
struct nvkm_cgrp *cgrp;
int runq;
union { int id; int chid; }; /*FIXME: remove later */
......@@ -63,7 +64,6 @@ struct nvkm_fifo {
} timeout;
int nr;
struct list_head chan;
spinlock_t lock;
struct mutex mutex;
};
......
......@@ -310,8 +310,6 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
if (ret)
return ret;
INIT_LIST_HEAD(&fifo->chan);
nr = func->chid_nr(fifo);
fifo->nr = nr;
......
......@@ -196,7 +196,7 @@ nvkm_cgrp_new(struct nvkm_runl *runl, const char *name, struct nvkm_vmm *vmm, bo
cgrp->hw = hw;
cgrp->id = -1;
kref_init(&cgrp->kref);
cgrp->chans = NULL;
INIT_LIST_HEAD(&cgrp->chans);
cgrp->chan_nr = 0;
spin_lock_init(&cgrp->lock);
INIT_LIST_HEAD(&cgrp->ectxs);
......
......@@ -31,7 +31,7 @@ struct nvkm_cgrp {
int id;
struct kref kref;
struct nvkm_chan *chans;
struct list_head chans;
int chan_nr;
spinlock_t lock; /* protects irq handler channel (group) lookup */
......@@ -46,7 +46,6 @@ struct nvkm_cgrp {
atomic_t rc;
struct list_head head;
struct list_head chan;
};
int nvkm_cgrp_new(struct nvkm_runl *, const char *name, struct nvkm_vmm *, bool hw,
......@@ -59,9 +58,9 @@ void nvkm_cgrp_vctx_put(struct nvkm_cgrp *, struct nvkm_vctx **);
void nvkm_cgrp_put(struct nvkm_cgrp **, unsigned long irqflags);
#define nvkm_cgrp_foreach_chan(chan,cgrp) for ((chan) = (cgrp)->chans; (chan); (chan) = NULL)
#define nvkm_cgrp_foreach_chan(chan,cgrp) list_for_each_entry((chan), &(cgrp)->chans, head)
#define nvkm_cgrp_foreach_chan_safe(chan,ctmp,cgrp) \
(void)(ctmp); nvkm_cgrp_foreach_chan((chan), (cgrp))
list_for_each_entry_safe((chan), (ctmp), &(cgrp)->chans, head)
#define CGRP_PRCLI(c,l,p,f,a...) RUNL_PRINT((c)->runl, l, p, "%04x:[%s]"f, (c)->id, (c)->name, ##a)
#define CGRP_PRINT(c,l,p,f,a...) RUNL_PRINT((c)->runl, l, p, "%04x:"f, (c)->id, ##a)
......
......@@ -339,22 +339,60 @@ nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
return 0;
}
static int
nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
void
nvkm_chan_remove_locked(struct nvkm_chan *chan)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
if (chan->func->fini)
chan->func->fini(chan);
return 0;
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_runl *runl = cgrp->runl;
if (list_empty(&chan->head))
return;
CHAN_TRACE(chan, "remove");
if (!--cgrp->chan_nr) {
runl->cgrp_nr--;
list_del(&cgrp->head);
}
runl->chan_nr--;
list_del_init(&chan->head);
atomic_set(&runl->changed, 1);
}
static int
nvkm_fifo_chan_init(struct nvkm_object *object)
void
nvkm_chan_remove(struct nvkm_chan *chan, bool preempt)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
if (chan->func->init)
chan->func->init(chan);
return 0;
struct nvkm_runl *runl = chan->cgrp->runl;
mutex_lock(&runl->mutex);
if (preempt && chan->func->preempt)
nvkm_chan_preempt_locked(chan, true);
nvkm_chan_remove_locked(chan);
nvkm_runl_update_locked(runl, true);
mutex_unlock(&runl->mutex);
}
void
nvkm_chan_insert(struct nvkm_chan *chan)
{
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_runl *runl = cgrp->runl;
mutex_lock(&runl->mutex);
if (WARN_ON(!list_empty(&chan->head))) {
mutex_unlock(&runl->mutex);
return;
}
CHAN_TRACE(chan, "insert");
list_add_tail(&chan->head, &cgrp->chans);
runl->chan_nr++;
if (!cgrp->chan_nr++) {
list_add_tail(&cgrp->head, &cgrp->runl->cgrps);
runl->cgrp_nr++;
}
atomic_set(&runl->changed, 1);
nvkm_runl_update_locked(runl, true);
mutex_unlock(&runl->mutex);
}
static void
......@@ -420,15 +458,7 @@ static void *
nvkm_fifo_chan_dtor(struct nvkm_object *object)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
struct nvkm_fifo *fifo = chan->fifo;
void *data = chan->func->dtor(chan);
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
if (!list_empty(&chan->head)) {
list_del(&chan->head);
}
spin_unlock_irqrestore(&fifo->lock, flags);
if (chan->vmm) {
nvkm_vmm_part(chan->vmm, chan->inst->memory);
......@@ -494,8 +524,6 @@ nvkm_chan_get_chid(struct nvkm_engine *engine, int id, unsigned long *pirqflags)
static const struct nvkm_object_func
nvkm_fifo_chan_func = {
.dtor = nvkm_fifo_chan_dtor,
.init = nvkm_fifo_chan_init,
.fini = nvkm_fifo_chan_fini,
.map = nvkm_fifo_chan_map,
};
......@@ -514,7 +542,6 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *fn,
struct nvkm_runl *runl;
struct nvkm_engn *engn = NULL;
struct nvkm_vmm *vmm = NULL;
unsigned long flags;
int ret;
nvkm_runl_foreach(runl, fifo) {
......@@ -532,8 +559,6 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *fn,
*func = *fifo->func->chan.func;
func->dtor = fn->dtor;
func->init = fn->init;
func->fini = fn->fini;
func->engine_ctor = fn->engine_ctor;
func->engine_dtor = fn->engine_dtor;
func->engine_init = fn->engine_init;
......@@ -611,23 +636,14 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *fn,
}
/* Allocate channel ID. */
if (runl->cgid) {
chan->id = chan->cgrp->id;
runl->chid->data[chan->id] = chan;
set_bit(chan->id, runl->chid->used);
goto temp_hack_until_no_chid_eq_cgid_req;
}
chan->id = nvkm_chid_get(runl->chid, chan);
if (chan->id < 0) {
RUNL_ERROR(runl, "!chids");
return -ENOSPC;
}
temp_hack_until_no_chid_eq_cgid_req:
spin_lock_irqsave(&fifo->lock, flags);
list_add(&chan->head, &fifo->chan);
spin_unlock_irqrestore(&fifo->lock, flags);
if (cgrp->id < 0)
cgrp->id = chan->id;
/* determine address of this channel's user registers */
chan->addr = device->func->resource_addr(device, bar) +
......
......@@ -24,8 +24,6 @@ struct nvkm_chan_func {
u32 (*doorbell_handle)(struct nvkm_chan *);
void *(*dtor)(struct nvkm_fifo_chan *);
void (*init)(struct nvkm_fifo_chan *);
void (*fini)(struct nvkm_fifo_chan *);
int (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
struct nvkm_object *);
void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
......@@ -44,6 +42,9 @@ void nvkm_chan_del(struct nvkm_chan **);
void nvkm_chan_allow(struct nvkm_chan *);
void nvkm_chan_block(struct nvkm_chan *);
void nvkm_chan_error(struct nvkm_chan *, bool preempt);
void nvkm_chan_insert(struct nvkm_chan *);
void nvkm_chan_remove(struct nvkm_chan *, bool preempt);
void nvkm_chan_remove_locked(struct nvkm_chan *);
int nvkm_chan_preempt(struct nvkm_chan *, bool wait);
int nvkm_chan_preempt_locked(struct nvkm_chan *, bool wait);
int nvkm_chan_cctx_get(struct nvkm_chan *, struct nvkm_engn *, struct nvkm_cctx **,
......
......@@ -169,8 +169,6 @@ g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
static const struct nvkm_fifo_chan_func
g84_fifo_chan_func = {
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
.engine_ctor = g84_fifo_chan_engine_ctor,
.engine_dtor = nv50_fifo_chan_engine_dtor,
.engine_init = g84_fifo_chan_engine_init,
......
......@@ -9,9 +9,6 @@ struct gf100_fifo_chan {
struct nvkm_fifo_chan base;
struct gf100_fifo *fifo;
struct list_head head;
bool killed;
#define GF100_FIFO_ENGN_GR 0
#define GF100_FIFO_ENGN_MSPDEC 1
#define GF100_FIFO_ENGN_MSPPP 2
......
......@@ -10,10 +10,6 @@ struct gk104_fifo_chan {
struct gk104_fifo *fifo;
int runl;
struct nvkm_fifo_cgrp *cgrp;
struct list_head head;
bool killed;
#define GK104_FIFO_ENGN_SW 15
struct gk104_fifo_engn {
struct nvkm_gpuobj *inst;
......@@ -26,8 +22,6 @@ extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *);
void gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *);
void gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *);
struct gk104_fifo_engn *gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *, struct nvkm_engine *);
int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
struct nvkm_object *);
......
......@@ -183,24 +183,6 @@ nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
}
void
nv50_fifo_chan_fini(struct nvkm_fifo_chan *base)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
struct nv50_fifo *fifo = chan->fifo;
nv50_fifo_runlist_update(fifo);
}
void
nv50_fifo_chan_init(struct nvkm_fifo_chan *base)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
struct nv50_fifo *fifo = chan->fifo;
nv50_fifo_runlist_update(fifo);
}
void *
nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
{
......@@ -216,8 +198,6 @@ nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
static const struct nvkm_fifo_chan_func
nv50_fifo_chan_func = {
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
.engine_ctor = nv50_fifo_chan_engine_ctor,
.engine_dtor = nv50_fifo_chan_engine_dtor,
.engine_init = nv50_fifo_chan_engine_init,
......
......@@ -40,8 +40,6 @@ struct nv50_fifo_chan {
int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
void nv50_fifo_chan_init(struct nvkm_fifo_chan *);
void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
struct nvkm_gpuobj **nv50_fifo_chan_engine(struct nv50_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
......
......@@ -125,7 +125,6 @@ g84_fifo_runl_ctor(struct nvkm_fifo *fifo)
static const struct nvkm_fifo_func
g84_fifo = {
.dtor = nv50_fifo_dtor,
.oneinit = nv50_fifo_oneinit,
.chid_nr = nv50_fifo_chid_nr,
.chid_ctor = nv50_fifo_chid_ctor,
.runl_ctor = g84_fifo_runl_ctor,
......
......@@ -50,7 +50,6 @@ g98_fifo_runl_ctor(struct nvkm_fifo *fifo)
static const struct nvkm_fifo_func
g98_fifo = {
.dtor = nv50_fifo_dtor,
.oneinit = nv50_fifo_oneinit,
.chid_nr = nv50_fifo_chid_nr,
.chid_ctor = nv50_fifo_chid_ctor,
.runl_ctor = g98_fifo_runl_ctor,
......
......@@ -287,64 +287,38 @@ gf100_runl_pending(struct nvkm_runl *runl)
return nvkm_rd32(runl->fifo->engine.subdev.device, 0x00227c) & 0x00100000;
}
void
gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
static void
gf100_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct gf100_fifo_chan *chan;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_runl *runl = nvkm_runl_first(&fifo->base);
struct nvkm_memory *cur;
int nr = 0;
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
int target;
mutex_lock(&fifo->base.mutex);
cur = fifo->runlist.mem[fifo->runlist.active];
fifo->runlist.active = !fifo->runlist.active;
nvkm_kmap(cur);
list_for_each_entry(chan, &fifo->chan, head) {
nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
nr++;
}
nvkm_done(cur);
switch (nvkm_memory_target(cur)) {
switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_VRAM: target = 0; break;
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
mutex_unlock(&fifo->base.mutex);
WARN_ON(1);
return;
}
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
(target << 28));
nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
runl->func->wait(runl);
mutex_unlock(&fifo->base.mutex);
}
void
gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{
mutex_lock(&fifo->base.mutex);
list_del_init(&chan->head);
mutex_unlock(&fifo->base.mutex);
nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12));
nvkm_wr32(device, 0x002274, 0x01f00000 | count);
}
void
gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
static void
gf100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
mutex_lock(&fifo->base.mutex);
list_add_tail(&chan->head, &fifo->chan);
mutex_unlock(&fifo->base.mutex);
nvkm_wo32(memory, offset + 0, chan->id);
nvkm_wo32(memory, offset + 4, 0x00000004);
}
static const struct nvkm_runl_func
gf100_runl = {
.size = 8,
.update = nv50_runl_update,
.insert_chan = gf100_runl_insert_chan,
.commit = gf100_runl_commit,
.wait = nv50_runl_wait,
.pending = gf100_runl_pending,
.block = gf100_runl_block,
......@@ -884,16 +858,6 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
false, &fifo->runlist.mem[0]);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
false, &fifo->runlist.mem[1]);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
0x1000, false, &fifo->user.mem);
if (ret)
......@@ -914,8 +878,6 @@ gf100_fifo_dtor(struct nvkm_fifo *base)
struct nvkm_device *device = fifo->base.engine.subdev.device;
nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
nvkm_memory_unref(&fifo->user.mem);
nvkm_memory_unref(&fifo->runlist.mem[0]);
nvkm_memory_unref(&fifo->runlist.mem[1]);
return fifo;
}
......@@ -950,7 +912,6 @@ gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
return -ENOMEM;
INIT_LIST_HEAD(&fifo->chan);
*pfifo = &fifo->base;
return nvkm_fifo_ctor(&gf100_fifo, device, type, inst, &fifo->base);
......
......@@ -6,24 +6,12 @@
#include <subdev/mmu.h>
struct gf100_fifo_chan;
struct gf100_fifo {
struct nvkm_fifo base;
struct list_head chan;
struct {
struct nvkm_memory *mem[2];
int active;
} runlist;
struct {
struct nvkm_memory *mem;
struct nvkm_vma *bar;
} user;
};
void gf100_fifo_runlist_insert(struct gf100_fifo *, struct gf100_fifo_chan *);
void gf100_fifo_runlist_remove(struct gf100_fifo *, struct gf100_fifo_chan *);
void gf100_fifo_runlist_commit(struct gf100_fifo *);
#endif
......@@ -326,15 +326,14 @@ gk104_runl_pending(struct nvkm_runl *runl)
}
void
gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
struct nvkm_memory *mem, int nr)
gk104_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_runl *rl = nvkm_runl_get(&fifo->base, runl, 0);
struct nvkm_fifo *fifo = runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
int target;
switch (nvkm_memory_target(mem)) {
switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_VRAM: target = 0; break;
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
......@@ -342,88 +341,25 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
return;
}
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
(target << 28));
nvkm_wr32(device, 0x002274, (runl << 20) | nr);
rl->func->wait(rl);
}
void
gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
{
const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
struct gk104_fifo_chan *chan;
struct nvkm_memory *mem;
struct nvkm_fifo_cgrp *cgrp;
int nr = 0;
mutex_lock(&fifo->base.mutex);
mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
fifo->runlist[runl].next = !fifo->runlist[runl].next;
nvkm_kmap(mem);
list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
func->chan(chan, mem, nr++ * func->size);
}
list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
func->cgrp(cgrp, mem, nr++ * func->size);
list_for_each_entry(chan, &cgrp->chan, head) {
func->chan(chan, mem, nr++ * func->size);
}
}
nvkm_done(mem);
func->commit(fifo, runl, mem, nr);
mutex_unlock(&fifo->base.mutex);
}
void
gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{
struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
mutex_lock(&fifo->base.mutex);
if (!list_empty(&chan->head)) {
list_del_init(&chan->head);
if (cgrp && !--cgrp->chan_nr)
list_del_init(&cgrp->head);
}
mutex_unlock(&fifo->base.mutex);
}
void
gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{
struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
mutex_lock(&fifo->base.mutex);
if (cgrp) {
if (!cgrp->chan_nr++)
list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
list_add_tail(&chan->head, &cgrp->chan);
} else {
list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
}
mutex_unlock(&fifo->base.mutex);
spin_lock_irq(&fifo->lock);
nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12));
nvkm_wr32(device, 0x002274, (runl->id << 20) | count);
spin_unlock_irq(&fifo->lock);
}
void
gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan,
struct nvkm_memory *memory, u32 offset)
gk104_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, chan->base.chid);
nvkm_wo32(memory, offset + 0, chan->id);
nvkm_wo32(memory, offset + 4, 0x00000000);
}
const struct gk104_fifo_runlist_func
gk104_fifo_runlist = {
.size = 8,
.chan = gk104_fifo_runlist_chan,
.commit = gk104_fifo_runlist_commit,
};
static const struct nvkm_runl_func
gk104_runl = {
.size = 8,
.update = nv50_runl_update,
.insert_chan = gk104_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
......@@ -793,7 +729,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
struct nvkm_device *device = subdev->device;
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
struct nvkm_top_device *tdev;
int ret, i, j;
int ret;
/* Determine runlist configuration from topology device info. */
list_for_each_entry(tdev, &device->top->device, head) {
......@@ -811,21 +747,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1);
}
for (i = 0; i < fifo->runlist_nr; i++) {
for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) {
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
fifo->base.nr * 2/* TSG+chan */ *
fifo->func->runlist->size,
0x1000, false,
&fifo->runlist[i].mem[j]);
if (ret)
return ret;
}
INIT_LIST_HEAD(&fifo->runlist[i].cgrp);
INIT_LIST_HEAD(&fifo->runlist[i].chan);
}
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
fifo->base.nr * 0x200, 0x1000, true,
&fifo->user.mem);
......@@ -845,16 +766,9 @@ gk104_fifo_dtor(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
int i;
nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
nvkm_memory_unref(&fifo->user.mem);
for (i = 0; i < fifo->runlist_nr; i++) {
nvkm_memory_unref(&fifo->runlist[i].mem[1]);
nvkm_memory_unref(&fifo->runlist[i].mem[0]);
}
return fifo;
}
......@@ -887,7 +801,6 @@ gk104_fifo = {
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &gk104_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk104_runl,
.runq = &gk104_runq,
......
......@@ -20,10 +20,6 @@ struct gk104_fifo {
int engine_nr;
struct {
struct nvkm_memory *mem[2];
int next;
struct list_head cgrp;
struct list_head chan;
u32 engm;
u32 engm_sw;
} runlist[16];
......@@ -37,27 +33,7 @@ struct gk104_fifo {
int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type,
int index, int nr, struct nvkm_fifo **);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
void *gk104_fifo_dtor(struct nvkm_fifo *base);
int gk104_fifo_oneinit(struct nvkm_fifo *);
void gk104_fifo_init(struct nvkm_fifo *base);
extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
struct nvkm_memory *, u32);
void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl,
struct nvkm_memory *, int);
extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
struct nvkm_memory *, u32);
extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
void gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
struct nvkm_memory *, u32);
void gv100_fifo_runlist_chan(struct gk104_fifo_chan *,
struct nvkm_memory *, u32);
#endif
......@@ -68,24 +68,20 @@ gk110_cgrp = {
};
void
gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
struct nvkm_memory *memory, u32 offset)
gk110_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, (cgrp->chan_nr << 26) | (128 << 18) |
(3 << 14) | 0x00002000 | cgrp->id);
nvkm_wo32(memory, offset + 4, 0x00000000);
}
const struct gk104_fifo_runlist_func
gk110_fifo_runlist = {
.size = 8,
.cgrp = gk110_fifo_runlist_cgrp,
.chan = gk104_fifo_runlist_chan,
.commit = gk104_fifo_runlist_commit,
};
const struct nvkm_runl_func
gk110_runl = {
.size = 8,
.update = nv50_runl_update,
.insert_cgrp = gk110_runl_insert_cgrp,
.insert_chan = gk104_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
......@@ -121,7 +117,6 @@ gk110_fifo = {
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &gk110_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk104_runq,
......
......@@ -64,7 +64,6 @@ gk208_fifo = {
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &gk110_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk208_runq,
......
......@@ -39,7 +39,6 @@ gk20a_fifo = {
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &gk110_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk208_runq,
......
......@@ -42,23 +42,19 @@ gm107_chan = {
};
static void
gm107_fifo_runlist_chan(struct gk104_fifo_chan *chan,
struct nvkm_memory *memory, u32 offset)
gm107_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, chan->base.chid);
nvkm_wo32(memory, offset + 4, chan->base.inst->addr >> 12);
nvkm_wo32(memory, offset + 0, chan->id);
nvkm_wo32(memory, offset + 4, chan->inst->addr >> 12);
}
const struct gk104_fifo_runlist_func
gm107_fifo_runlist = {
.size = 8,
.cgrp = gk110_fifo_runlist_cgrp,
.chan = gm107_fifo_runlist_chan,
.commit = gk104_fifo_runlist_commit,
};
const struct nvkm_runl_func
gm107_runl = {
.size = 8,
.update = nv50_runl_update,
.insert_cgrp = gk110_runl_insert_cgrp,
.insert_chan = gm107_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
......@@ -145,7 +141,6 @@ gm107_fifo = {
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gm107_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &gm107_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &gm107_runl,
.runq = &gk208_runq,
......
......@@ -53,7 +53,6 @@ gm200_fifo = {
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gm107_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &gm107_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &gm107_runl,
.runq = &gk208_runq,
......
......@@ -25,12 +25,26 @@
#include "gk104.h"
#include "changk104.h"
#include <core/gpuobj.h>
#include <subdev/fault.h>
#include <nvif/class.h>
static void
gp100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, chan->id | chan->runq << 14);
nvkm_wo32(memory, offset + 4, chan->inst->addr >> 12);
}
static const struct nvkm_runl_func
gp100_runl = {
.runqs = 2,
.size = 8,
.update = nv50_runl_update,
.insert_cgrp = gk110_runl_insert_cgrp,
.insert_chan = gp100_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
......@@ -112,7 +126,6 @@ gp100_fifo = {
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gp100_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &gm107_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &gp100_runl,
.runq = &gk208_runq,
......
......@@ -127,30 +127,6 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
}
static void
gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
struct gf100_fifo *fifo = chan->fifo;
if (!list_empty(&chan->head) && !chan->killed) {
gf100_fifo_runlist_remove(fifo, chan);
gf100_fifo_runlist_commit(fifo);
}
}
static void
gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
struct gf100_fifo *fifo = chan->fifo;
if (list_empty(&chan->head) && !chan->killed) {
gf100_fifo_runlist_insert(fifo, chan);
gf100_fifo_runlist_commit(fifo);
}
}
static void *
gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
......@@ -160,8 +136,6 @@ gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
static const struct nvkm_fifo_chan_func
gf100_fifo_gpfifo_func = {
.dtor = gf100_fifo_gpfifo_dtor,
.init = gf100_fifo_gpfifo_init,
.fini = gf100_fifo_gpfifo_fini,
.engine_ctor = gf100_fifo_gpfifo_engine_ctor,
.engine_dtor = gf100_fifo_gpfifo_engine_dtor,
.engine_init = gf100_fifo_gpfifo_engine_init,
......@@ -197,7 +171,6 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
chan->fifo = fifo;
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
0x1000, 0x1000, true, args->v0.vmm, 0,
......
......@@ -152,43 +152,16 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
}
void
gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct gk104_fifo *fifo = chan->fifo;
if (!list_empty(&chan->head)) {
gk104_fifo_runlist_remove(fifo, chan);
gk104_fifo_runlist_update(fifo, chan->runl);
}
}
void
gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct gk104_fifo *fifo = chan->fifo;
if (list_empty(&chan->head) && !chan->killed) {
gk104_fifo_runlist_insert(fifo, chan);
gk104_fifo_runlist_update(fifo, chan->runl);
}
}
void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
kfree(chan->cgrp);
return chan;
}
const struct nvkm_fifo_chan_func
gk104_fifo_gpfifo_func = {
.dtor = gk104_fifo_gpfifo_dtor,
.init = gk104_fifo_gpfifo_init,
.fini = gk104_fifo_gpfifo_fini,
.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gk104_fifo_gpfifo_engine_init,
......@@ -215,7 +188,6 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
*pobject = &chan->base.object;
chan->fifo = fifo;
chan->runl = runlist;
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
0x1000, 0x1000, true, vmm, 0, fifo->runlist[runlist].engm_sw,
......@@ -227,18 +199,6 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
*chid = chan->base.chid;
*inst = chan->base.inst->addr;
/* Hack to support GPUs where even individual channels should be
* part of a channel group.
*/
if (fifo->func->cgrp.force) {
if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
return -ENOMEM;
chan->cgrp->id = chan->base.chid;
INIT_LIST_HEAD(&chan->cgrp->head);
INIT_LIST_HEAD(&chan->cgrp->chan);
chan->cgrp->chan_nr = 0;
}
/* Clear channel control registers. */
usermem = chan->base.chid * 0x200;
ilength = order_base_2(ilength / 8);
......
......@@ -105,8 +105,6 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
static const struct nvkm_fifo_chan_func
gv100_fifo_gpfifo = {
.dtor = gk104_fifo_gpfifo_dtor,
.init = gk104_fifo_gpfifo_init,
.fini = gk104_fifo_gpfifo_fini,
.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
......@@ -134,7 +132,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
*pobject = &chan->base.object;
chan->fifo = fifo;
chan->runl = runlist;
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
0, fifo->runlist[runlist].engm, 1, fifo->user.bar->addr, 0x200,
......@@ -146,18 +143,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
*inst = chan->base.inst->addr;
*token = chan->base.func->doorbell_handle(&chan->base);
/* Hack to support GPUs where even individual channels should be
* part of a channel group.
*/
if (fifo->func->cgrp.force) {
if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
return -ENOMEM;
chan->cgrp->id = chan->base.chid;
INIT_LIST_HEAD(&chan->cgrp->head);
INIT_LIST_HEAD(&chan->cgrp->chan);
chan->cgrp->chan_nr = 0;
}
/* Clear channel control registers. */
usermem = chan->base.chid * 0x200;
ilength = order_base_2(ilength / 8);
......
......@@ -31,8 +31,6 @@
static const struct nvkm_fifo_chan_func
tu102_fifo_gpfifo = {
.dtor = gk104_fifo_gpfifo_dtor,
.init = gk104_fifo_gpfifo_init,
.fini = gk104_fifo_gpfifo_fini,
.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
......
......@@ -98,22 +98,20 @@ gv100_runl_preempt(struct nvkm_runl *runl)
}
void
gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
struct nvkm_memory *memory, u32 offset)
gv100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
struct nvkm_memory *usermem = chan->fifo->user.mem;
const u64 user = nvkm_memory_addr(usermem) + (chan->base.chid * 0x200);
const u64 inst = chan->base.inst->addr;
struct nvkm_memory *usermem = gk104_fifo(chan->cgrp->runl->fifo)->user.mem;
const u64 user = nvkm_memory_addr(usermem) + (chan->id * 0x200);
const u64 inst = chan->inst->addr;
nvkm_wo32(memory, offset + 0x0, lower_32_bits(user));
nvkm_wo32(memory, offset + 0x0, lower_32_bits(user) | chan->runq << 1);
nvkm_wo32(memory, offset + 0x4, upper_32_bits(user));
nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->base.chid);
nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->id);
nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
}
void
gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
struct nvkm_memory *memory, u32 offset)
gv100_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0x0, (128 << 24) | (3 << 16) | 0x00000001);
nvkm_wo32(memory, offset + 0x4, cgrp->chan_nr);
......@@ -121,16 +119,14 @@ gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
nvkm_wo32(memory, offset + 0xc, 0x00000000);
}
static const struct gk104_fifo_runlist_func
gv100_fifo_runlist = {
.size = 16,
.cgrp = gv100_fifo_runlist_cgrp,
.chan = gv100_fifo_runlist_chan,
.commit = gk104_fifo_runlist_commit,
};
static const struct nvkm_runl_func
gv100_runl = {
.runqs = 2,
.size = 16,
.update = nv50_runl_update,
.insert_cgrp = gv100_runl_insert_cgrp,
.insert_chan = gv100_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
......@@ -401,7 +397,6 @@ gv100_fifo = {
.intr_ctxsw_timeout = gv100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gv100_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &gv100_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &gv100_runl,
.runq = &gv100_runq,
......
......@@ -82,36 +82,6 @@ const struct nvkm_engn_func
nv50_engn_sw = {
};
static void
nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_memory *cur;
int i, p;
cur = fifo->runlist[fifo->cur_runlist];
fifo->cur_runlist = !fifo->cur_runlist;
nvkm_kmap(cur);
for (i = 0, p = 0; i < fifo->base.nr; i++) {
if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
nvkm_wo32(cur, p++ * 4, i);
}
nvkm_done(cur);
nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
nvkm_wr32(device, 0x0032ec, p);
nvkm_wr32(device, 0x002500, 0x00000101);
}
void
nv50_fifo_runlist_update(struct nv50_fifo *fifo)
{
mutex_lock(&fifo->base.mutex);
nv50_fifo_runlist_update_locked(fifo);
mutex_unlock(&fifo->base.mutex);
}
static bool
nv50_runl_pending(struct nvkm_runl *runl)
{
......@@ -132,17 +102,112 @@ nv50_runl_wait(struct nvkm_runl *runl)
return -ETIMEDOUT;
}
static void
nv50_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
nvkm_wr32(device, 0x0032f4, addr >> 12);
nvkm_wr32(device, 0x0032ec, count);
}
static void
nv50_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset, chan->id);
}
static struct nvkm_memory *
nv50_runl_alloc(struct nvkm_runl *runl, u32 *offset)
{
const u32 segment = ALIGN((runl->cgrp_nr + runl->chan_nr) * runl->func->size, 0x1000);
const u32 maxsize = (runl->cgid ? runl->cgid->nr : 0) + runl->chid->nr;
int ret;
if (unlikely(!runl->mem)) {
ret = nvkm_memory_new(runl->fifo->engine.subdev.device, NVKM_MEM_TARGET_INST,
maxsize * 2 * runl->func->size, 0, false, &runl->mem);
if (ret) {
RUNL_ERROR(runl, "alloc %d\n", ret);
return ERR_PTR(ret);
}
} else {
if (runl->offset + segment >= nvkm_memory_size(runl->mem)) {
ret = runl->func->wait(runl);
if (ret) {
RUNL_DEBUG(runl, "rewind timeout");
return ERR_PTR(ret);
}
runl->offset = 0;
}
}
*offset = runl->offset;
runl->offset += segment;
return runl->mem;
}
int
nv50_runl_update(struct nvkm_runl *runl)
{
struct nvkm_memory *memory;
struct nvkm_cgrp *cgrp;
struct nvkm_chan *chan;
u32 start, offset, count;
/*TODO: prio, interleaving. */
RUNL_TRACE(runl, "RAMRL: update cgrps:%d chans:%d", runl->cgrp_nr, runl->chan_nr);
memory = nv50_runl_alloc(runl, &start);
if (IS_ERR(memory))
return PTR_ERR(memory);
RUNL_TRACE(runl, "RAMRL: update start:%08x", start);
offset = start;
nvkm_kmap(memory);
nvkm_runl_foreach_cgrp(cgrp, runl) {
if (cgrp->hw) {
CGRP_TRACE(cgrp, " RAMRL+%08x: chans:%d", offset, cgrp->chan_nr);
runl->func->insert_cgrp(cgrp, memory, offset);
offset += runl->func->size;
}
nvkm_cgrp_foreach_chan(chan, cgrp) {
CHAN_TRACE(chan, "RAMRL+%08x: [%s]", offset, chan->name);
runl->func->insert_chan(chan, memory, offset);
offset += runl->func->size;
}
}
nvkm_done(memory);
/*TODO: look into using features on newer HW to guarantee forward progress. */
list_rotate_left(&runl->cgrps);
count = (offset - start) / runl->func->size;
RUNL_TRACE(runl, "RAMRL: commit start:%08x count:%d", start, count);
runl->func->commit(runl, memory, start, count);
return 0;
}
const struct nvkm_runl_func
nv50_runl = {
.size = 4,
.update = nv50_runl_update,
.insert_chan = nv50_runl_insert_chan,
.commit = nv50_runl_commit,
.wait = nv50_runl_wait,
.pending = nv50_runl_pending,
};
void
nv50_fifo_init(struct nvkm_fifo *base)
nv50_fifo_init(struct nvkm_fifo *fifo)
{
struct nv50_fifo *fifo = nv50_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_runl *runl = nvkm_runl_first(fifo);
struct nvkm_device *device = fifo->engine.subdev.device;
int i;
nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
......@@ -155,7 +220,9 @@ nv50_fifo_init(struct nvkm_fifo *base)
for (i = 0; i < 128; i++)
nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
nv50_fifo_runlist_update_locked(fifo);
atomic_set(&runl->changed, 1);
runl->func->update(runl);
nvkm_wr32(device, 0x003200, 0x00000001);
nvkm_wr32(device, 0x003250, 0x00000001);
......@@ -175,28 +242,10 @@ nv50_fifo_chid_nr(struct nvkm_fifo *fifo)
return 128;
}
int
nv50_fifo_oneinit(struct nvkm_fifo *base)
{
struct nv50_fifo *fifo = nv50_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
false, &fifo->runlist[0]);
if (ret)
return ret;
return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
false, &fifo->runlist[1]);
}
void *
nv50_fifo_dtor(struct nvkm_fifo *base)
{
struct nv50_fifo *fifo = nv50_fifo(base);
nvkm_memory_unref(&fifo->runlist[1]);
nvkm_memory_unref(&fifo->runlist[0]);
return fifo;
}
......@@ -221,7 +270,6 @@ nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
static const struct nvkm_fifo_func
nv50_fifo = {
.dtor = nv50_fifo_dtor,
.oneinit = nv50_fifo_oneinit,
.chid_nr = nv50_fifo_chid_nr,
.chid_ctor = nv50_fifo_chid_ctor,
.runl_ctor = nv04_fifo_runl_ctor,
......
......@@ -6,17 +6,12 @@
struct nv50_fifo {
struct nvkm_fifo base;
struct nvkm_memory *runlist[2];
int cur_runlist;
};
int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_fifo **);
void *nv50_fifo_dtor(struct nvkm_fifo *);
int nv50_fifo_oneinit(struct nvkm_fifo *);
void nv50_fifo_init(struct nvkm_fifo *);
void nv50_fifo_runlist_update(struct nv50_fifo *);
int g84_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
#endif
......@@ -10,7 +10,6 @@ struct nvkm_memory;
struct nvkm_runl;
struct nvkm_runq;
struct gk104_fifo;
struct gk104_fifo_chan;
struct nvkm_fifo_chan_oclass;
struct nvkm_fifo_func {
......@@ -42,16 +41,6 @@ struct nvkm_fifo_func {
void (*pause)(struct nvkm_fifo *, unsigned long *);
void (*start)(struct nvkm_fifo *, unsigned long *);
const struct gk104_fifo_runlist_func {
u8 size;
void (*cgrp)(struct nvkm_fifo_cgrp *,
struct nvkm_memory *, u32 offset);
void (*chan)(struct gk104_fifo_chan *,
struct nvkm_memory *, u32 offset);
void (*commit)(struct gk104_fifo *, int runl,
struct nvkm_memory *, int entries);
} *runlist;
const struct nvkm_event_func *nonstall;
const struct nvkm_runl_func *runl;
......@@ -98,7 +87,9 @@ int nv10_fifo_chid_nr(struct nvkm_fifo *);
int nv50_fifo_chid_nr(struct nvkm_fifo *);
int nv50_fifo_chid_ctor(struct nvkm_fifo *, int);
void nv50_fifo_init(struct nvkm_fifo *);
extern const struct nvkm_runl_func nv50_runl;
int nv50_runl_update(struct nvkm_runl *);
int nv50_runl_wait(struct nvkm_runl *);
extern const struct nvkm_engn_func nv50_engn_sw;
void nv50_chan_unbind(struct nvkm_chan *);
......@@ -140,6 +131,8 @@ extern const struct nvkm_enum gk104_fifo_mmu_fault_reason[];
extern const struct nvkm_enum gk104_fifo_mmu_fault_hubclient[];
extern const struct nvkm_enum gk104_fifo_mmu_fault_gpcclient[];
int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
void gk104_runl_insert_chan(struct nvkm_chan *, struct nvkm_memory *, u64);
void gk104_runl_commit(struct nvkm_runl *, struct nvkm_memory *, u32, int);
bool gk104_runl_pending(struct nvkm_runl *);
void gk104_runl_block(struct nvkm_runl *, u32);
void gk104_runl_allow(struct nvkm_runl *, u32);
......@@ -162,6 +155,7 @@ void gk104_chan_stop(struct nvkm_chan *);
int gk110_fifo_chid_ctor(struct nvkm_fifo *, int);
extern const struct nvkm_runl_func gk110_runl;
extern const struct nvkm_cgrp_func gk110_cgrp;
void gk110_runl_insert_cgrp(struct nvkm_cgrp *, struct nvkm_memory *, u64);
extern const struct nvkm_chan_func gk110_chan;
void gk110_chan_preempt(struct nvkm_chan *);
......@@ -180,6 +174,8 @@ extern const struct nvkm_enum gv100_fifo_mmu_fault_access[];
extern const struct nvkm_enum gv100_fifo_mmu_fault_reason[];
extern const struct nvkm_enum gv100_fifo_mmu_fault_hubclient[];
extern const struct nvkm_enum gv100_fifo_mmu_fault_gpcclient[];
void gv100_runl_insert_cgrp(struct nvkm_cgrp *, struct nvkm_memory *, u64);
void gv100_runl_insert_chan(struct nvkm_chan *, struct nvkm_memory *, u64);
void gv100_runl_preempt(struct nvkm_runl *);
extern const struct nvkm_runq_func gv100_runq;
extern const struct nvkm_engn_func gv100_engn;
......
......@@ -54,9 +54,6 @@ nvkm_engn_cgrp_get(struct nvkm_engn *engn, unsigned long *pirqflags)
return cgrp;
}
#include "gf100.h"
#include "gk104.h"
static void
nvkm_runl_rc(struct nvkm_runl *runl)
{
......@@ -79,8 +76,10 @@ nvkm_runl_rc(struct nvkm_runl *runl)
state = atomic_cmpxchg(&cgrp->rc, NVKM_CGRP_RC_PENDING, NVKM_CGRP_RC_RUNNING);
if (state == NVKM_CGRP_RC_PENDING) {
/* Disable all channels in them, and remove from runlist. */
nvkm_cgrp_foreach_chan_safe(chan, ctmp, cgrp)
nvkm_cgrp_foreach_chan_safe(chan, ctmp, cgrp) {
nvkm_chan_error(chan, false);
nvkm_chan_remove_locked(chan);
}
}
}
......@@ -119,16 +118,14 @@ nvkm_runl_rc(struct nvkm_runl *runl)
}
/* Submit runlist update, and clear any remaining exception state. */
if (runl->fifo->engine.subdev.device->card_type < NV_E0)
gf100_fifo_runlist_commit(gf100_fifo(runl->fifo));
else
gk104_fifo_runlist_update(gk104_fifo(runl->fifo), runl->id);
runl->func->update(runl);
if (runl->func->fault_clear)
runl->func->fault_clear(runl);
/* Unblock runlist processing. */
while (rc--)
nvkm_runl_allow(runl);
runl->func->wait(runl);
}
static void
......@@ -270,6 +267,16 @@ nvkm_runl_update_pending(struct nvkm_runl *runl)
return true;
}
void
nvkm_runl_update_locked(struct nvkm_runl *runl, bool wait)
{
if (atomic_xchg(&runl->changed, 0) && runl->func->update) {
runl->func->update(runl);
if (wait)
runl->func->wait(runl);
}
}
void
nvkm_runl_allow(struct nvkm_runl *runl)
{
......@@ -309,6 +316,8 @@ nvkm_runl_del(struct nvkm_runl *runl)
{
struct nvkm_engn *engn, *engt;
nvkm_memory_unref(&runl->mem);
list_for_each_entry_safe(engn, engt, &runl->engns, head) {
list_del(&engn->head);
kfree(engn);
......@@ -395,6 +404,7 @@ nvkm_runl_new(struct nvkm_fifo *fifo, int runi, u32 addr, int id_nr)
runl->addr = addr;
INIT_LIST_HEAD(&runl->engns);
INIT_LIST_HEAD(&runl->cgrps);
atomic_set(&runl->changed, 0);
mutex_init(&runl->mutex);
INIT_WORK(&runl->work, nvkm_runl_work);
atomic_set(&runl->rc_triggered, 0);
......
......@@ -2,6 +2,7 @@
#define __NVKM_RUNL_H__
#include <core/os.h>
struct nvkm_cgrp;
struct nvkm_chan;
struct nvkm_memory;
enum nvkm_subdev_type;
......@@ -28,6 +29,12 @@ struct nvkm_engn {
struct nvkm_runl {
const struct nvkm_runl_func {
int runqs;
u8 size;
int (*update)(struct nvkm_runl *);
void (*insert_cgrp)(struct nvkm_cgrp *, struct nvkm_memory *, u64 offset);
void (*insert_chan)(struct nvkm_chan *, struct nvkm_memory *, u64 offset);
void (*commit)(struct nvkm_runl *, struct nvkm_memory *, u32 start, int count);
int (*wait)(struct nvkm_runl *);
bool (*pending)(struct nvkm_runl *);
void (*block)(struct nvkm_runl *, u32 engm);
......@@ -52,6 +59,9 @@ struct nvkm_runl {
struct list_head cgrps;
int cgrp_nr;
int chan_nr;
atomic_t changed;
struct nvkm_memory *mem;
u32 offset;
struct mutex mutex;
int blocked;
......@@ -71,6 +81,7 @@ void nvkm_runl_del(struct nvkm_runl *);
void nvkm_runl_fini(struct nvkm_runl *);
void nvkm_runl_block(struct nvkm_runl *);
void nvkm_runl_allow(struct nvkm_runl *);
void nvkm_runl_update_locked(struct nvkm_runl *, bool wait);
bool nvkm_runl_update_pending(struct nvkm_runl *);
int nvkm_runl_preempt_wait(struct nvkm_runl *);
......
......@@ -66,28 +66,25 @@ tu102_runl_pending(struct nvkm_runl *runl)
}
static void
tu102_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
struct nvkm_memory *mem, int nr)
tu102_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
u64 addr = nvkm_memory_addr(mem);
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
/*XXX: target? */
nvkm_wr32(device, 0x002b00 + (runl * 0x10), lower_32_bits(addr));
nvkm_wr32(device, 0x002b04 + (runl * 0x10), upper_32_bits(addr));
nvkm_wr32(device, 0x002b08 + (runl * 0x10), nr);
nvkm_wr32(device, 0x002b00 + (runl->id * 0x10), lower_32_bits(addr));
nvkm_wr32(device, 0x002b04 + (runl->id * 0x10), upper_32_bits(addr));
nvkm_wr32(device, 0x002b08 + (runl->id * 0x10), count);
}
static const struct gk104_fifo_runlist_func
tu102_fifo_runlist = {
.size = 16,
.cgrp = gv100_fifo_runlist_cgrp,
.chan = gv100_fifo_runlist_chan,
.commit = tu102_fifo_runlist_commit,
};
static const struct nvkm_runl_func
tu102_runl = {
.runqs = 2,
.size = 16,
.update = nv50_runl_update,
.insert_cgrp = gv100_runl_insert_cgrp,
.insert_chan = gv100_runl_insert_chan,
.commit = tu102_runl_commit,
.wait = nv50_runl_wait,
.pending = tu102_runl_pending,
.block = gk104_runl_block,
......@@ -274,7 +271,6 @@ tu102_fifo = {
.intr = tu102_fifo_intr,
.mmu_fault = &tu102_fifo_mmu_fault,
.engine_id = gk104_fifo_engine_id,
.runlist = &tu102_fifo_runlist,
.nonstall = &gf100_fifo_nonstall,
.runl = &tu102_runl,
.runq = &gv100_runq,
......
......@@ -213,14 +213,9 @@ static int
nvkm_uchan_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
int ret;
nvkm_chan_block(chan);
nvkm_chan_preempt(chan, true);
ret = chan->object.func->fini(&chan->object, suspend);
if (ret && suspend)
return ret;
nvkm_chan_remove(chan, true);
if (chan->func->unbind)
chan->func->unbind(chan);
......@@ -240,8 +235,8 @@ nvkm_uchan_init(struct nvkm_object *object)
chan->func->bind(chan);
nvkm_chan_allow(chan);
return chan->object.func->init(&chan->object);
nvkm_chan_insert(chan);
return 0;
}
static void *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment