Commit 3a6bc9c2 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo: add runlist block()/allow()

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent 4a492fd5
......@@ -211,11 +211,24 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
void
nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_oproxy *oproxy, struct nvkm_cctx *cctx)
{
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_runl *runl = cgrp->runl;
/* Prevent any channel in channel group from being rescheduled, kick them
* off host and any engine(s) they're loaded on.
*/
if (cgrp->hw)
nvkm_runl_block(runl);
/* Update context pointer. */
if (cctx)
nvkm_fifo_chan_child_init(nvkm_oproxy(oproxy->object));
else
nvkm_fifo_chan_child_fini(nvkm_oproxy(oproxy->object), false);
/* Resume normal operation. */
if (cgrp->hw)
nvkm_runl_allow(runl);
}
void
......
......@@ -115,6 +115,18 @@ gf100_runq = {
.intr_0_names = gf100_runq_intr_0_names,
};
static void
gf100_runl_allow(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, 0x00000000);
}
static void
gf100_runl_block(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, engm);
}
static bool
gf100_runl_pending(struct nvkm_runl *runl)
{
......@@ -181,6 +193,8 @@ static const struct nvkm_runl_func
gf100_runl = {
.wait = nv50_runl_wait,
.pending = gf100_runl_pending,
.block = gf100_runl_block,
.allow = gf100_runl_allow,
};
static void
......
......@@ -197,6 +197,18 @@ gk104_runq = {
.intr_0_names = gk104_runq_intr_0_names,
};
void
gk104_runl_allow(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000);
}
void
gk104_runl_block(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id));
}
bool
gk104_runl_pending(struct nvkm_runl *runl)
{
......@@ -306,6 +318,8 @@ static const struct nvkm_runl_func
gk104_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
};
int
......
......@@ -62,6 +62,8 @@ const struct nvkm_runl_func
gk110_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
};
int
......
......@@ -56,6 +56,8 @@ const struct nvkm_runl_func
gm107_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
};
static const struct nvkm_enum
......
......@@ -33,6 +33,8 @@ static const struct nvkm_runl_func
gp100_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
};
static const struct nvkm_enum
......
......@@ -37,15 +37,12 @@ gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
static int
gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
{
struct nvkm_subdev *subdev = &chan->base.fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
const u32 mask = ce ? 0x00020000 : 0x00010000;
const u32 data = valid ? mask : 0x00000000;
int ret;
/* Block runlist to prevent the channel from being rescheduled. */
mutex_lock(&chan->fifo->base.mutex);
nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
/* Preempt the channel. */
ret = gk104_fifo_gpfifo_kick_locked(chan);
......@@ -57,7 +54,6 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
}
/* Resume runlist. */
nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
mutex_unlock(&chan->fifo->base.mutex);
return ret;
}
......
......@@ -86,6 +86,8 @@ static const struct nvkm_runl_func
gv100_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
};
const struct nvkm_enum
......
......@@ -129,14 +129,13 @@ nv04_engn = {
};
void
nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
__acquires(fifo->base.lock)
nv04_fifo_pause(struct nvkm_fifo *fifo, unsigned long *pflags)
__acquires(fifo->lock)
{
struct nv04_fifo *fifo = nv04_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_device *device = fifo->engine.subdev.device;
unsigned long flags;
spin_lock_irqsave(&fifo->base.lock, flags);
spin_lock_irqsave(&fifo->lock, flags);
*pflags = flags;
nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
......@@ -165,17 +164,16 @@ __acquires(fifo->base.lock)
}
void
nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
__releases(fifo->base.lock)
nv04_fifo_start(struct nvkm_fifo *fifo, unsigned long *pflags)
__releases(fifo->lock)
{
struct nv04_fifo *fifo = nv04_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_device *device = fifo->engine.subdev.device;
unsigned long flags = *pflags;
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
spin_unlock_irqrestore(&fifo->base.lock, flags);
spin_unlock_irqrestore(&fifo->lock, flags);
}
const struct nvkm_runl_func
......
......@@ -134,6 +134,8 @@ extern const struct nvkm_enum gk104_fifo_mmu_fault_gpcclient[];
void gk104_fifo_recover_chan(struct nvkm_fifo *, int);
int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
bool gk104_runl_pending(struct nvkm_runl *);
void gk104_runl_block(struct nvkm_runl *, u32);
void gk104_runl_allow(struct nvkm_runl *, u32);
extern const struct nvkm_runq_func gk104_runq;
void gk104_runq_init(struct nvkm_runq *);
bool gk104_runq_intr(struct nvkm_runq *, struct nvkm_runl *);
......
......@@ -82,6 +82,34 @@ nvkm_runl_update_pending(struct nvkm_runl *runl)
return true;
}
void
nvkm_runl_allow(struct nvkm_runl *runl)
{
struct nvkm_fifo *fifo = runl->fifo;
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
if (!--runl->blocked) {
RUNL_TRACE(runl, "running");
runl->func->allow(runl, ~0);
}
spin_unlock_irqrestore(&fifo->lock, flags);
}
void
nvkm_runl_block(struct nvkm_runl *runl)
{
struct nvkm_fifo *fifo = runl->fifo;
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
if (!runl->blocked++) {
RUNL_TRACE(runl, "stopped");
runl->func->block(runl, ~0);
}
spin_unlock_irqrestore(&fifo->lock, flags);
}
void
nvkm_runl_del(struct nvkm_runl *runl)
{
......
......@@ -26,6 +26,8 @@ struct nvkm_runl {
const struct nvkm_runl_func {
int (*wait)(struct nvkm_runl *);
bool (*pending)(struct nvkm_runl *);
void (*block)(struct nvkm_runl *, u32 engm);
void (*allow)(struct nvkm_runl *, u32 engm);
} *func;
struct nvkm_fifo *fifo;
int id;
......@@ -44,6 +46,8 @@ struct nvkm_runl {
int chan_nr;
struct mutex mutex;
int blocked;
struct list_head head;
};
......@@ -52,6 +56,8 @@ struct nvkm_runl *nvkm_runl_get(struct nvkm_fifo *, int runi, u32 addr);
struct nvkm_engn *nvkm_runl_add(struct nvkm_runl *, int engi, const struct nvkm_engn_func *,
enum nvkm_subdev_type, int inst);
void nvkm_runl_del(struct nvkm_runl *);
void nvkm_runl_block(struct nvkm_runl *);
void nvkm_runl_allow(struct nvkm_runl *);
bool nvkm_runl_update_pending(struct nvkm_runl *);
struct nvkm_chan *nvkm_runl_chan_get_chid(struct nvkm_runl *, int chid, unsigned long *irqflags);
......
......@@ -68,6 +68,8 @@ static const struct nvkm_runl_func
tu102_runl = {
.wait = nv50_runl_wait,
.pending = tu102_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
};
static const struct nvkm_enum
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment