Commit fd67738a authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo: pre-move some blocks of code around

- will make subsequent patches more obvious
- no code changes
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent f5e45689
......@@ -211,6 +211,14 @@ nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
return 0;
}
static int
nvkm_fifo_init(struct nvkm_engine *engine)
{
struct nvkm_fifo *fifo = nvkm_fifo(engine);
fifo->func->init(fifo);
return 0;
}
static int
nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
{
......@@ -240,14 +248,6 @@ nvkm_fifo_preinit(struct nvkm_engine *engine)
nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
}
static int
nvkm_fifo_init(struct nvkm_engine *engine)
{
struct nvkm_fifo *fifo = nvkm_fifo(engine);
fifo->func->init(fifo);
return 0;
}
static void *
nvkm_fifo_dtor(struct nvkm_engine *engine)
{
......
......@@ -69,72 +69,6 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
return hash;
}
void
nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_memory *fctx = device->imem->ramfc;
const struct nv04_fifo_ramfc *c;
unsigned long flags;
u32 mask = fifo->base.nr - 1;
u32 data = chan->ramfc;
u32 chid;
/* prevent fifo context switches */
spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
/* if this channel is active, replace it with a null context */
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
if (chid == chan->base.chid) {
nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c = fifo->ramfc;
nvkm_kmap(fctx);
do {
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
} while ((++c)->bits);
nvkm_done(fctx);
c = fifo->ramfc;
do {
nvkm_wr32(device, c->regp, 0x00000000);
} while ((++c)->bits);
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
}
/* restore normal operation, after disabling dma mode */
nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&fifo->base.lock, flags);
}
void
nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device;
u32 mask = 1 << chan->base.chid;
unsigned long flags;
spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
spin_unlock_irqrestore(&fifo->base.lock, flags);
}
void *
nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
{
......
......@@ -53,6 +53,52 @@ static const struct nvkm_chan_func
gf100_chan = {
};
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
/* { 0x00008000, "" } seen with null ib push */
{ 0x00200000, "ILLEGAL_MTHD" },
{ 0x00800000, "EMPTY_SUBC" },
{}
};
static void
gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
struct nvkm_fifo_chan *chan;
unsigned long flags;
u32 show = stat;
char msg[128];
if (stat & 0x00800000) {
if (device->sw) {
if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
show &= ~0x00800000;
}
}
if (show) {
nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
"subc %d mthd %04x data %08x\n",
unit, show, msg, chid, chan ? chan->inst->addr : 0,
chan ? chan->object.client->name : "unknown",
subc, mthd, data);
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
}
nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
}
void
gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
{
......@@ -399,52 +445,6 @@ gf100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
nvkm_fifo_fault(fifo, &info);
}
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
/* { 0x00008000, "" } seen with null ib push */
{ 0x00200000, "ILLEGAL_MTHD" },
{ 0x00800000, "EMPTY_SUBC" },
{}
};
static void
gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
struct nvkm_fifo_chan *chan;
unsigned long flags;
u32 show= stat;
char msg[128];
if (stat & 0x00800000) {
if (device->sw) {
if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
show &= ~0x00800000;
}
}
if (show) {
nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
"subc %d mthd %04x data %08x\n",
unit, show, msg, chid, chan ? chan->inst->addr : 0,
chan ? chan->object.client->name : "unknown",
subc, mthd, data);
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
}
nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
}
static void
gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
{
......@@ -576,46 +576,6 @@ gf100_fifo_intr(struct nvkm_fifo *base)
}
}
static int
gf100_fifo_oneinit(struct nvkm_fifo *base)
{
struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int ret;
/* Determine number of PBDMAs by checking valid enable bits. */
nvkm_wr32(device, 0x002204, 0xffffffff);
fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
false, &fifo->runlist.mem[0]);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
false, &fifo->runlist.mem[1]);
if (ret)
return ret;
init_waitqueue_head(&fifo->runlist.wait);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
0x1000, false, &fifo->user.mem);
if (ret)
return ret;
ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
&fifo->user.bar);
if (ret)
return ret;
return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
static void
gf100_fifo_fini(struct nvkm_fifo *base)
{
......@@ -659,6 +619,46 @@ gf100_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
}
static int
gf100_fifo_oneinit(struct nvkm_fifo *base)
{
struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int ret;
/* Determine number of PBDMAs by checking valid enable bits. */
nvkm_wr32(device, 0x002204, 0xffffffff);
fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
false, &fifo->runlist.mem[0]);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
false, &fifo->runlist.mem[1]);
if (ret)
return ret;
init_waitqueue_head(&fifo->runlist.wait);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
0x1000, false, &fifo->user.mem);
if (ret)
return ret;
ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
&fifo->user.bar);
if (ret)
return ret;
return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
static void *
gf100_fifo_dtor(struct nvkm_fifo *base)
{
......
......@@ -49,6 +49,72 @@ nv04_fifo_ramfc[] = {
{}
};
void
nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_memory *fctx = device->imem->ramfc;
const struct nv04_fifo_ramfc *c;
unsigned long flags;
u32 mask = fifo->base.nr - 1;
u32 data = chan->ramfc;
u32 chid;
/* prevent fifo context switches */
spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
/* if this channel is active, replace it with a null context */
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
if (chid == chan->base.chid) {
nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c = fifo->ramfc;
nvkm_kmap(fctx);
do {
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
} while ((++c)->bits);
nvkm_done(fctx);
c = fifo->ramfc;
do {
nvkm_wr32(device, c->regp, 0x00000000);
} while ((++c)->bits);
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
}
/* restore normal operation, after disabling dma mode */
nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&fifo->base.lock, flags);
}
void
nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nv04_fifo *fifo = chan->fifo;
struct nvkm_device *device = fifo->base.engine.subdev.device;
u32 mask = 1 << chan->base.chid;
unsigned long flags;
spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
spin_unlock_irqrestore(&fifo->base.lock, flags);
}
static const struct nvkm_chan_func
nv04_chan = {
};
......
......@@ -64,22 +64,6 @@ nv50_fifo_runlist_update(struct nv50_fifo *fifo)
mutex_unlock(&fifo->base.mutex);
}
int
nv50_fifo_oneinit(struct nvkm_fifo *base)
{
struct nv50_fifo *fifo = nv50_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
false, &fifo->runlist[0]);
if (ret)
return ret;
return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
false, &fifo->runlist[1]);
}
void
nv50_fifo_init(struct nvkm_fifo *base)
{
......@@ -110,6 +94,22 @@ nv50_fifo_chid_nr(struct nvkm_fifo *fifo)
return 128;
}
int
nv50_fifo_oneinit(struct nvkm_fifo *base)
{
struct nv50_fifo *fifo = nv50_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
false, &fifo->runlist[0]);
if (ret)
return ret;
return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
false, &fifo->runlist[1]);
}
void *
nv50_fifo_dtor(struct nvkm_fifo *base)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment