Commit 3647c53b authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo: add RAMFC info to nvkm_chan_func

- adds support for specifying SUBDEVICE_ID for channel
- rounds non-power-of-two GPFIFO sizes down, rather than up
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent fbe9f433
......@@ -28,6 +28,13 @@ struct nvkm_chan {
u32 base;
} userd;
u32 ramfc_offset;
struct nvkm_gpuobj *ramfc;
struct nvkm_gpuobj *cache;
struct nvkm_gpuobj *eng;
struct nvkm_gpuobj *pgd;
struct nvkm_ramht *ramht;
spinlock_t lock;
atomic_t blocked;
atomic_t errored;
......
......@@ -29,8 +29,8 @@
#include "priv.h"
#include <core/client.h>
#include <core/gpuobj.h>
#include <core/oproxy.h>
#include <core/ramht.h>
#include <subdev/mmu.h>
#include <engine/dma.h>
......@@ -434,6 +434,15 @@ nvkm_chan_del(struct nvkm_chan **pchan)
if (!chan)
return;
if (chan->func->ramfc->clear)
chan->func->ramfc->clear(chan);
nvkm_ramht_del(&chan->ramht);
nvkm_gpuobj_del(&chan->pgd);
nvkm_gpuobj_del(&chan->eng);
nvkm_gpuobj_del(&chan->cache);
nvkm_gpuobj_del(&chan->ramfc);
nvkm_memory_unref(&chan->userd.mem);
if (chan->cgrp) {
......@@ -618,16 +627,17 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *fn,
chan->vmm = nvkm_vmm_ref(vmm);
}
/* allocate push buffer ctxdma instance */
if (push) {
/* Allocate HW ctxdma for push buffer. */
if (func->ramfc->ctxdma) {
dmaobj = nvkm_dmaobj_search(client, push);
if (IS_ERR(dmaobj))
return PTR_ERR(dmaobj);
ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
&chan->push);
if (ret)
ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push);
if (ret) {
RUNL_DEBUG(runl, "bind %d", ret);
return ret;
}
}
/* Allocate channel ID. */
......
......@@ -29,6 +29,21 @@ struct nvkm_chan_func {
void (*clear)(struct nvkm_chan *);
} *userd;
const struct nvkm_chan_func_ramfc {
const struct nvkm_ramfc_layout {
unsigned bits:6;
unsigned ctxs:5;
unsigned ctxp:8;
unsigned regs:5;
unsigned regp;
} *layout;
int (*write)(struct nvkm_chan *, u64 offset, u64 length, u32 devm, bool priv);
void (*clear)(struct nvkm_chan *);
bool ctxdma;
u32 devm;
bool priv;
} *ramfc;
void (*bind)(struct nvkm_chan *);
void (*unbind)(struct nvkm_chan *);
void (*start)(struct nvkm_chan *);
......
......@@ -182,7 +182,6 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
const struct nvkm_oclass *oclass,
struct nv50_fifo_chan *chan)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
if (!vmm)
......@@ -206,28 +205,5 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
BIT(G84_FIFO_ENGN_DMA),
0, 0xc00000, 0x2000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->base.inst,
&chan->eng);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
&chan->pgd);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->base.inst,
&chan->cache);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->base.inst,
&chan->ramfc);
if (ret)
return ret;
return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
return ret;
}
......@@ -187,11 +187,6 @@ void *
nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
nvkm_ramht_del(&chan->ramht);
nvkm_gpuobj_del(&chan->pgd);
nvkm_gpuobj_del(&chan->eng);
nvkm_gpuobj_del(&chan->cache);
nvkm_gpuobj_del(&chan->ramfc);
return chan;
}
......@@ -211,7 +206,6 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
const struct nvkm_oclass *oclass,
struct nv50_fifo_chan *chan)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
if (!vmm)
......@@ -225,23 +219,5 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
BIT(NV50_FIFO_ENGN_DMA),
0, 0xc00000, 0x2000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst,
&chan->ramfc);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst,
&chan->eng);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
&chan->pgd);
if (ret)
return ret;
return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
return ret;
}
......@@ -9,10 +9,7 @@ struct nv50_fifo_chan {
struct nv50_fifo *fifo;
struct nvkm_fifo_chan base;
struct nvkm_gpuobj *ramfc;
struct nvkm_gpuobj *cache;
struct nvkm_gpuobj *eng;
struct nvkm_gpuobj *pgd;
struct nvkm_ramht *ramht;
#define NV50_FIFO_ENGN_SW 0
......
......@@ -73,15 +73,6 @@ void *
nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo *fifo = chan->fifo;
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
const struct nv04_fifo_ramfc *c = fifo->ramfc;
nvkm_kmap(imem->ramfc);
do {
nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
} while ((++c)->bits);
nvkm_done(imem->ramfc);
return chan;
}
......@@ -102,8 +93,6 @@ nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
} *args = data;
struct nv04_fifo *fifo = nv04_fifo(base);
struct nv04_fifo_chan *chan = NULL;
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
int ret = -ENOSYS;
nvif_ioctl(parent, "create channel dma size %d\n", size);
......@@ -131,20 +120,8 @@ nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return ret;
args->v0.chid = chan->base.chid;
chan->ramfc = chan->base.chid * 32;
nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(imem->ramfc);
chan->base.func->ramfc->write(&chan->base, args->v0.offset, 0, BIT(0), false);
return 0;
}
......
......@@ -41,8 +41,6 @@ nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
} *args = data;
struct nv04_fifo *fifo = nv04_fifo(base);
struct nv04_fifo_chan *chan = NULL;
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
int ret = -ENOSYS;
nvif_ioctl(parent, "create channel dma size %d\n", size);
......@@ -70,20 +68,8 @@ nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return ret;
args->v0.chid = chan->base.chid;
chan->ramfc = chan->base.chid * 32;
nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(imem->ramfc);
chan->base.func->ramfc->write(&chan->base, args->v0.offset, 0, BIT(0), false);
return 0;
}
......
......@@ -41,8 +41,6 @@ nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
} *args = data;
struct nv04_fifo *fifo = nv04_fifo(base);
struct nv04_fifo_chan *chan = NULL;
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
int ret = -ENOSYS;
nvif_ioctl(parent, "create channel dma size %d\n", size);
......@@ -71,20 +69,8 @@ nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return ret;
args->v0.chid = chan->base.chid;
chan->ramfc = chan->base.chid * 64;
nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(imem->ramfc);
chan->base.func->ramfc->write(&chan->base, args->v0.offset, 0, BIT(0), false);
return 0;
}
......
......@@ -194,8 +194,6 @@ nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
} *args = data;
struct nv04_fifo *fifo = nv04_fifo(base);
struct nv04_fifo_chan *chan = NULL;
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
int ret = -ENOSYS;
nvif_ioctl(parent, "create channel dma size %d\n", size);
......@@ -224,21 +222,8 @@ nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return ret;
args->v0.chid = chan->base.chid;
chan->ramfc = chan->base.chid * 128;
nvkm_kmap(imem->ramfc);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
nvkm_done(imem->ramfc);
chan->base.func->ramfc->write(&chan->base, args->v0.offset, 0, BIT(0), false);
return 0;
}
......
......@@ -25,7 +25,7 @@
#include "chan.h"
#include "runl.h"
#include <core/gpuobj.h>
#include <core/ramht.h>
#include "nv50.h"
#include "channv50.h"
......@@ -37,13 +37,69 @@ g84_chan_bind(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x002600 + (chan->id * 4), nv50_fifo_chan(chan)->ramfc->addr >> 8);
nvkm_wr32(device, 0x002600 + (chan->id * 4), chan->ramfc->addr >> 8);
}
static int
g84_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
const u32 limit2 = ilog2(length / 8);
int ret;
ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->inst, &chan->eng);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->inst, &chan->pgd);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->inst, &chan->cache);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->inst, &chan->ramfc);
if (ret)
return ret;
ret = nvkm_ramht_new(device, 0x8000, 16, chan->inst, &chan->ramht);
if (ret)
return ret;
nv50_fifo_chan(chan)->eng = chan->eng;
nv50_fifo_chan(chan)->ramht = chan->ramht;
nvkm_kmap(chan->ramfc);
nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(chan->ramfc, 0x48, chan->push->node->offset >> 4);
nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(offset));
nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(chan->ramfc, 0x7c, 0x30000000 | devm);
nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4));
nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
nvkm_wo32(chan->ramfc, 0x98, chan->inst->addr >> 12);
nvkm_done(chan->ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
g84_chan_ramfc = {
.write = g84_chan_ramfc_write,
.ctxdma = true,
.devm = 0xfff,
};
const struct nvkm_chan_func
g84_chan = {
.inst = &nv50_chan_inst,
.userd = &nv50_chan_userd,
.ramfc = &g84_chan_ramfc,
.bind = g84_chan_bind,
.unbind = nv50_chan_unbind,
.start = nv50_chan_start,
......
......@@ -82,6 +82,39 @@ gf100_chan_bind(struct nvkm_chan *chan)
nvkm_wr32(device, 0x003000 + (chan->id * 8), 0xc0000000 | chan->inst->addr >> 12);
}
static int
gf100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
const u32 limit2 = ilog2(length / 8);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd));
nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd));
nvkm_wo32(chan->inst, 0x10, 0x0000face);
nvkm_wo32(chan->inst, 0x30, 0xfffff902);
nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset));
nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->inst, 0x54, 0x00000002);
nvkm_wo32(chan->inst, 0x84, 0x20400000);
nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm);
nvkm_wo32(chan->inst, 0x9c, 0x00000100);
nvkm_wo32(chan->inst, 0xa4, 0x1f1f1f1f);
nvkm_wo32(chan->inst, 0xa8, 0x1f1f1f1f);
nvkm_wo32(chan->inst, 0xac, 0x0000001f);
nvkm_wo32(chan->inst, 0xb8, 0xf8000000);
nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */
nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */
nvkm_done(chan->inst);
return 0;
}
static const struct nvkm_chan_func_ramfc
gf100_chan_ramfc = {
.write = gf100_chan_ramfc_write,
.devm = 0xfff,
};
void
gf100_chan_userd_clear(struct nvkm_chan *chan)
{
......@@ -117,6 +150,7 @@ static const struct nvkm_chan_func
gf100_chan = {
.inst = &gf100_chan_inst,
.userd = &gf100_chan_userd,
.ramfc = &gf100_chan_ramfc,
.bind = gf100_chan_bind,
.unbind = gf100_chan_unbind,
.start = gf100_chan_start,
......
......@@ -78,6 +78,39 @@ gk104_chan_bind(struct nvkm_chan *chan)
gk104_chan_bind_inst(chan);
}
static int
gk104_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
const u32 limit2 = ilog2(length / 8);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd));
nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd));
nvkm_wo32(chan->inst, 0x10, 0x0000face);
nvkm_wo32(chan->inst, 0x30, 0xfffff902);
nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset));
nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->inst, 0x84, 0x20400000);
nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm);
nvkm_wo32(chan->inst, 0x9c, 0x00000100);
nvkm_wo32(chan->inst, 0xac, 0x0000001f);
nvkm_wo32(chan->inst, 0xe4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->inst, 0xe8, chan->id);
nvkm_wo32(chan->inst, 0xb8, 0xf8000000);
nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */
nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */
nvkm_done(chan->inst);
return 0;
}
const struct nvkm_chan_func_ramfc
gk104_chan_ramfc = {
.write = gk104_chan_ramfc_write,
.devm = 0xfff,
.priv = true,
};
const struct nvkm_chan_func_userd
gk104_chan_userd = {
.bar = 1,
......@@ -89,6 +122,7 @@ static const struct nvkm_chan_func
gk104_chan = {
.inst = &gf100_chan_inst,
.userd = &gk104_chan_userd,
.ramfc = &gk104_chan_ramfc,
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
......
......@@ -51,6 +51,7 @@ const struct nvkm_chan_func
gk110_chan = {
.inst = &gf100_chan_inst,
.userd = &gk104_chan_userd,
.ramfc = &gk104_chan_ramfc,
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
......
......@@ -36,6 +36,7 @@ const struct nvkm_chan_func
gm107_chan = {
.inst = &gf100_chan_inst,
.userd = &gk104_chan_userd,
.ramfc = &gk104_chan_ramfc,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
......
......@@ -65,23 +65,9 @@ g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
args->v0.chid = chan->base.chid;
ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8);
ilength = args->v0.ilength;
nvkm_kmap(chan->ramfc);
nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4));
nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
nvkm_done(chan->ramfc);
chan->base.func->ramfc->write(&chan->base, ioffset, ilength, BIT(0), false);
return 0;
}
......
......@@ -190,28 +190,9 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
usermem = nvkm_memory_addr(chan->base.userd.mem) + chan->base.userd.base;
ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8);
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
(ilength << 16));
nvkm_wo32(chan->base.inst, 0x54, 0x00000002);
nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f);
nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f);
nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
nvkm_done(chan->base.inst);
ilength = args->v0.ilength;
chan->base.func->ramfc->write(&chan->base, ioffset, ilength, BIT(0), false);
return 0;
}
......
......@@ -176,7 +176,6 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
{
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret;
u64 usermem;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
......@@ -199,28 +198,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
*chid = chan->base.chid;
*inst = chan->base.inst->addr;
usermem = nvkm_memory_addr(chan->base.userd.mem) + chan->base.userd.base;
ilength = order_base_2(ilength / 8);
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
(ilength << 16));
nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
nvkm_wo32(chan->base.inst, 0xe4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
nvkm_done(chan->base.inst);
chan->base.func->ramfc->write(&chan->base, ioffset, ilength, BIT(0), priv);
return 0;
}
......
......@@ -120,7 +120,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
{
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret;
u64 usermem;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
......@@ -143,27 +142,7 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
*inst = chan->base.inst->addr;
*token = chan->base.func->doorbell_handle(&chan->base);
/* Clear channel control registers. */
usermem = nvkm_memory_addr(chan->base.userd.mem) + chan->base.userd.base;
ilength = order_base_2(ilength / 8);
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
nvkm_wo32(chan->base.inst, 0x00c, upper_32_bits(usermem));
nvkm_wo32(chan->base.inst, 0x010, 0x0000face);
nvkm_wo32(chan->base.inst, 0x030, 0x7ffff902);
nvkm_wo32(chan->base.inst, 0x048, lower_32_bits(ioffset));
nvkm_wo32(chan->base.inst, 0x04c, upper_32_bits(ioffset) |
(ilength << 16));
nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
nvkm_wo32(chan->base.inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
nvkm_done(chan->base.inst);
chan->base.func->ramfc->write(&chan->base, ioffset, ilength, BIT(0), priv);
return 0;
}
......
......@@ -65,21 +65,9 @@ nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
args->v0.chid = chan->base.chid;
ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8);
ilength = args->v0.ilength;
nvkm_kmap(chan->ramfc);
nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4));
nvkm_done(chan->ramfc);
chan->base.func->ramfc->write(&chan->base, ioffset, ilength, BIT(0), false);
return 0;
}
......
......@@ -38,6 +38,37 @@ gv100_chan_doorbell_handle(struct nvkm_chan *chan)
return chan->id;
}
static int
gv100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
const u32 limit2 = ilog2(length / 8);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x008, lower_32_bits(userd));
nvkm_wo32(chan->inst, 0x00c, upper_32_bits(userd));
nvkm_wo32(chan->inst, 0x010, 0x0000face);
nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
nvkm_wo32(chan->inst, 0x048, lower_32_bits(offset));
nvkm_wo32(chan->inst, 0x04c, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->inst, 0x084, 0x20400000);
nvkm_wo32(chan->inst, 0x094, 0x30000000 | devm);
nvkm_wo32(chan->inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->inst, 0x0e8, chan->id);
nvkm_wo32(chan->inst, 0x0f4, 0x00001000 | (priv ? 0x00000100 : 0x00000000));
nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
nvkm_done(chan->inst);
return 0;
}
const struct nvkm_chan_func_ramfc
gv100_chan_ramfc = {
.write = gv100_chan_ramfc_write,
.devm = 0xfff,
.priv = true,
};
const struct nvkm_chan_func_userd
gv100_chan_userd = {
.bar = 1, /*FIXME: hw doesn't have poller, flip to user-allocated in uapi commit. */
......@@ -49,6 +80,7 @@ static const struct nvkm_chan_func
gv100_chan = {
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &gv100_chan_ramfc,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
......
......@@ -38,42 +38,29 @@
#include <nvif/class.h>
static const struct nv04_fifo_ramfc
nv04_fifo_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
{}
};
void
nv04_chan_stop(struct nvkm_chan *chan)
{
struct nv04_fifo *fifo = nv04_fifo(chan->cgrp->runl->fifo);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_memory *fctx = device->imem->ramfc;
const struct nv04_fifo_ramfc *c;
const struct nvkm_ramfc_layout *c;
unsigned long flags;
u32 data = nv04_fifo_chan(chan)->ramfc;
u32 data = chan->ramfc_offset;
u32 chid;
/* prevent fifo context switches */
spin_lock_irqsave(&fifo->base.lock, flags);
spin_lock_irqsave(&fifo->lock, flags);
nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
/* if this channel is active, replace it with a null context */
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.chid->mask;
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask;
if (chid == chan->id) {
nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c = fifo->ramfc;
c = chan->func->ramfc->layout;
nvkm_kmap(fctx);
do {
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
......@@ -84,14 +71,14 @@ nv04_chan_stop(struct nvkm_chan *chan)
} while ((++c)->bits);
nvkm_done(fctx);
c = fifo->ramfc;
c = chan->func->ramfc->layout;
do {
nvkm_wr32(device, c->regp, 0x00000000);
} while ((++c)->bits);
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.chid->mask);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
}
......@@ -99,7 +86,7 @@ nv04_chan_stop(struct nvkm_chan *chan)
/* restore normal operation, after disabling dma mode */
nvkm_mask(device, NV04_PFIFO_MODE, BIT(chan->id), 0);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&fifo->base.lock, flags);
spin_unlock_irqrestore(&fifo->lock, flags);
}
void
......@@ -113,6 +100,59 @@ nv04_chan_start(struct nvkm_chan *chan)
spin_unlock_irqrestore(&fifo->lock, flags);
}
void
nv04_chan_ramfc_clear(struct nvkm_chan *chan)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const struct nvkm_ramfc_layout *c = chan->func->ramfc->layout;
nvkm_kmap(ramfc);
do {
nvkm_wo32(ramfc, chan->ramfc_offset + c->ctxp, 0x00000000);
} while ((++c)->bits);
nvkm_done(ramfc);
}
static int
nv04_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const u32 base = chan->id * 32;
chan->ramfc_offset = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
nvkm_wo32(ramfc, base + 0x04, offset);
nvkm_wo32(ramfc, base + 0x08, chan->push->addr >> 4);
nvkm_wo32(ramfc, base + 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv04_chan_ramfc = {
.layout = (const struct nvkm_ramfc_layout[]) {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
{}
},
.write = nv04_chan_ramfc_write,
.clear = nv04_chan_ramfc_clear,
.ctxdma = true,
};
const struct nvkm_chan_func_userd
nv04_chan_userd = {
.bar = 0,
......@@ -129,6 +169,7 @@ static const struct nvkm_chan_func
nv04_chan = {
.inst = &nv04_chan_inst,
.userd = &nv04_chan_userd,
.ramfc = &nv04_chan_ramfc,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
......@@ -476,7 +517,6 @@ nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
return -ENOMEM;
fifo->ramfc = ramfc;
*pfifo = &fifo->base;
ret = nvkm_fifo_ctor(func, device, type, inst, &fifo->base);
......@@ -507,5 +547,5 @@ int
nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nv04_fifo_new_(&nv04_fifo, device, type, inst, 16, nv04_fifo_ramfc, pfifo);
return nv04_fifo_new_(&nv04_fifo, device, type, inst, 0, NULL, pfifo);
}
......@@ -4,17 +4,10 @@
#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
#include "priv.h"
struct nv04_fifo_ramfc {
unsigned bits:6;
unsigned ctxs:5;
unsigned ctxp:8;
unsigned regs:5;
unsigned regp;
};
#define nv04_fifo_ramfc nvkm_ramfc_layout
struct nv04_fifo {
struct nvkm_fifo base;
const struct nv04_fifo_ramfc *ramfc;
};
int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
......
......@@ -21,7 +21,12 @@
*
* Authors: Ben Skeggs
*/
#include "cgrp.h"
#include "chan.h"
#include "runl.h"
#include <core/gpuobj.h>
#include <subdev/instmem.h>
#include "nv04.h"
#include "channv04.h"
......@@ -29,24 +34,52 @@
#include <nvif/class.h>
static const struct nv04_fifo_ramfc
nv10_fifo_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
{}
static int
nv10_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const u32 base = chan->id * 32;
chan->ramfc_offset = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
nvkm_wo32(ramfc, base + 0x04, offset);
nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
nvkm_wo32(ramfc, base + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv10_chan_ramfc = {
.layout = (const struct nvkm_ramfc_layout[]) {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
{}
},
.write = nv10_chan_ramfc_write,
.clear = nv04_chan_ramfc_clear,
.ctxdma = true,
};
static const struct nvkm_chan_func
nv10_chan = {
.inst = &nv04_chan_inst,
.userd = &nv04_chan_userd,
.ramfc = &nv10_chan_ramfc,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
......@@ -78,5 +111,5 @@ int
nv10_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nv04_fifo_new_(&nv10_fifo, device, type, inst, 32, nv10_fifo_ramfc, pfifo);
return nv04_fifo_new_(&nv10_fifo, device, type, inst, 0, NULL, pfifo);
}
......@@ -21,8 +21,10 @@
*
* Authors: Ben Skeggs
*/
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "nv04.h"
#include "channv04.h"
......@@ -33,29 +35,57 @@
#include <nvif/class.h>
static const struct nv04_fifo_ramfc
nv17_fifo_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
{ 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
{ 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
{ 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
{ 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
{ 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
{}
static int
nv17_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const u32 base = chan->id * 64;
chan->ramfc_offset = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
nvkm_wo32(ramfc, base + 0x04, offset);
nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
nvkm_wo32(ramfc, base + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv17_chan_ramfc = {
.layout = (const struct nvkm_ramfc_layout[]) {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
{ 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
{ 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
{ 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
{ 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
{ 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
{}
},
.write = nv17_chan_ramfc_write,
.clear = nv04_chan_ramfc_clear,
.ctxdma = true,
};
static const struct nvkm_chan_func
nv17_chan = {
.inst = &nv04_chan_inst,
.userd = &nv04_chan_userd,
.ramfc = &nv17_chan_ramfc,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
......@@ -110,5 +140,5 @@ int
nv17_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nv04_fifo_new_(&nv17_fifo, device, type, inst, 32, nv17_fifo_ramfc, pfifo);
return nv04_fifo_new_(&nv17_fifo, device, type, inst, 0, NULL, pfifo);
}
......@@ -21,6 +21,7 @@
*
* Authors: Ben Skeggs
*/
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
......@@ -35,31 +36,61 @@
#include <nvif/class.h>
static const struct nv04_fifo_ramfc
nv40_fifo_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 2, 28, 0x18, 28, 0x002058 },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
{ 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
{ 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
{ 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
{ 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
{ 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
{ 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
{ 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
{ 32, 0, 0x40, 0, 0x0032e4 },
{ 32, 0, 0x44, 0, 0x0032e8 },
{ 32, 0, 0x4c, 0, 0x002088 },
{ 32, 0, 0x50, 0, 0x003300 },
{ 32, 0, 0x54, 0, 0x00330c },
{}
static int
nv40_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const u32 base = chan->id * 128;
chan->ramfc_offset = base;
nv04_fifo_chan(chan)->ramfc = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
nvkm_wo32(ramfc, base + 0x04, offset);
nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
nvkm_wo32(ramfc, base + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_wo32(ramfc, base + 0x3c, 0x0001ffff);
nvkm_done(ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv40_chan_ramfc = {
.layout = (const struct nvkm_ramfc_layout[]) {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 2, 28, 0x18, 28, 0x002058 },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
{ 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
{ 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
{ 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
{ 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
{ 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
{ 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
{ 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
{ 32, 0, 0x40, 0, 0x0032e4 },
{ 32, 0, 0x44, 0, 0x0032e8 },
{ 32, 0, 0x4c, 0, 0x002088 },
{ 32, 0, 0x50, 0, 0x003300 },
{ 32, 0, 0x54, 0, 0x00330c },
{}
},
.write = nv40_chan_ramfc_write,
.clear = nv04_chan_ramfc_clear,
.ctxdma = true,
};
static const struct nvkm_chan_func_userd
......@@ -73,6 +104,7 @@ static const struct nvkm_chan_func
nv40_chan = {
.inst = &nv04_chan_inst,
.userd = &nv40_chan_userd,
.ramfc = &nv40_chan_ramfc,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
......@@ -157,5 +189,5 @@ int
nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nv04_fifo_new_(&nv40_fifo, device, type, inst, 32, nv40_fifo_ramfc, pfifo);
return nv04_fifo_new_(&nv40_fifo, device, type, inst, 0, NULL, pfifo);
}
......@@ -26,7 +26,7 @@
#include "chid.h"
#include "runl.h"
#include <core/gpuobj.h>
#include <core/ramht.h>
#include <subdev/timer.h>
#include "nv50.h"
......@@ -63,9 +63,58 @@ nv50_chan_bind(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x002600 + (chan->id * 4), nv50_fifo_chan(chan)->ramfc->addr >> 12);
nvkm_wr32(device, 0x002600 + (chan->id * 4), chan->ramfc->addr >> 12);
}
static int
nv50_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
const u32 limit2 = ilog2(length / 8);
int ret;
ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->inst, &chan->ramfc);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->inst, &chan->eng);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->inst, &chan->pgd);
if (ret)
return ret;
ret = nvkm_ramht_new(device, 0x8000, 16, chan->inst, &chan->ramht);
if (ret)
return ret;
nv50_fifo_chan(chan)->eng = chan->eng;
nv50_fifo_chan(chan)->ramht = chan->ramht;
nvkm_kmap(chan->ramfc);
nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(chan->ramfc, 0x48, chan->push->node->offset >> 4);
nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(offset));
nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(chan->ramfc, 0x7c, 0x30000000 | devm);
nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4));
nvkm_done(chan->ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv50_chan_ramfc = {
.write = nv50_chan_ramfc_write,
.ctxdma = true,
.devm = 0xfff,
};
const struct nvkm_chan_func_userd
nv50_chan_userd = {
.bar = 0,
......@@ -83,6 +132,7 @@ static const struct nvkm_chan_func
nv50_chan = {
.inst = &nv50_chan_inst,
.userd = &nv50_chan_userd,
.ramfc = &nv50_chan_ramfc,
.bind = nv50_chan_bind,
.unbind = nv50_chan_unbind,
.start = nv50_chan_start,
......
......@@ -82,6 +82,7 @@ extern const struct nvkm_engn_func nv04_engn;
extern const struct nvkm_cgrp_func nv04_cgrp;
extern const struct nvkm_chan_func_inst nv04_chan_inst;
extern const struct nvkm_chan_func_userd nv04_chan_userd;
void nv04_chan_ramfc_clear(struct nvkm_chan *);
void nv04_chan_start(struct nvkm_chan *);
void nv04_chan_stop(struct nvkm_chan *);
......@@ -154,6 +155,7 @@ bool gk104_engn_chsw(struct nvkm_engn *);
int gk104_engn_cxid(struct nvkm_engn *, bool *cgid);
extern const struct nvkm_engn_func gk104_engn_ce;
extern const struct nvkm_chan_func_userd gk104_chan_userd;
extern const struct nvkm_chan_func_ramfc gk104_chan_ramfc;
void gk104_chan_bind(struct nvkm_chan *);
void gk104_chan_bind_inst(struct nvkm_chan *);
void gk104_chan_unbind(struct nvkm_chan *);
......@@ -189,6 +191,7 @@ extern const struct nvkm_runq_func gv100_runq;
extern const struct nvkm_engn_func gv100_engn;
extern const struct nvkm_engn_func gv100_engn_ce;
extern const struct nvkm_chan_func_userd gv100_chan_userd;
extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc;
void tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *, u32 info);
extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault;
......
......@@ -51,6 +51,7 @@ static const struct nvkm_chan_func
tu102_chan = {
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &gv100_chan_ramfc,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = tu102_chan_start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment