Commit e1ef6b42 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/kms/nv50: remove code to support non-atomic page flips

Made completely unreachable (and broken) by atomic commits.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent c2d926aa
......@@ -835,10 +835,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
else
BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
......@@ -867,6 +864,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan;
struct nouveau_cli *cli;
struct nouveau_fence *fence;
struct nv04_display *dispnv04 = nv04_display(dev);
int head = nouveau_crtc(crtc)->index;
int ret;
chan = drm->channel;
......@@ -913,14 +912,6 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
drm_crtc_vblank_get(crtc);
/* Emit a page flip */
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
if (ret)
goto fail_unreserve;
} else {
struct nv04_display *dispnv04 = nv04_display(dev);
int head = nouveau_crtc(crtc)->index;
if (swap_interval) {
ret = RING_SPACE(chan, 8);
if (ret)
......@@ -937,7 +928,6 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
nouveau_bo_ref(new_bo, &dispnv04->image[head]);
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
......@@ -986,16 +976,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
drm_crtc_arm_vblank_event(s->crtc, s->event);
} else {
drm_crtc_send_vblank_event(s->crtc, s->event);
/* Give up ownership of vblank for page-flipped crtc */
drm_crtc_vblank_put(s->crtc);
}
}
else {
/* Give up ownership of vblank for page-flipped crtc */
drm_crtc_vblank_put(s->crtc);
}
......@@ -1017,13 +999,11 @@ nouveau_flip_complete(struct nvif_notify *notify)
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
state.offset + state.crtc->y *
state.pitch + state.crtc->x *
state.bpp / 8);
}
}
return NVIF_NOTIFY_KEEP;
}
......
......@@ -92,7 +92,6 @@ struct nv84_fence_chan {
struct nouveau_fence_chan base;
struct nvkm_vma vma;
struct nvkm_vma vma_gart;
struct nvkm_vma dispc_vma[4];
};
struct nv84_fence_priv {
......@@ -102,7 +101,6 @@ struct nv84_fence_priv {
u32 *suspend;
};
u64 nv84_fence_crtc(struct nouveau_channel *, int);
int nv84_fence_context_new(struct nouveau_channel *);
#endif
......@@ -57,10 +57,7 @@ void
nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
int i;
nouveau_fence_context_del(&fctx->base);
for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
nvif_object_fini(&fctx->head[i]);
nvif_object_fini(&fctx->sema);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
......
......@@ -7,7 +7,6 @@
struct nv10_fence_chan {
struct nouveau_fence_chan base;
struct nvif_object sema;
struct nvif_object head[4];
};
struct nv10_fence_priv {
......
......@@ -658,11 +658,8 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
struct nv50_head {
struct nouveau_crtc base;
struct nouveau_bo *image;
struct nv50_ovly ovly;
struct nv50_oimm oimm;
struct nv50_base *_base;
};
#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
......@@ -740,40 +737,6 @@ evo_kick(u32 *push, void *evoc)
*((p)++) = _d; \
} while(0)
static bool
evo_sync_wait(void *data)
{
if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
return true;
usleep_range(1, 2);
return false;
}
static int
evo_sync(struct drm_device *dev)
{
struct nvif_device *device = &nouveau_drm(dev)->device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_mast *mast = nv50_mast(dev);
u32 *push = evo_wait(mast, 8);
if (push) {
nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
evo_mthd(push, 0x0084, 1);
evo_data(push, 0x80000000 | EVO_MAST_NTFY);
evo_mthd(push, 0x0080, 2);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_kick(push, mast);
if (nvif_msec(device, 2000,
if (evo_sync_wait(disp->sync))
break;
) >= 0)
return 0;
}
return -EBUSY;
}
/******************************************************************************
* Plane
*****************************************************************************/
......@@ -789,8 +752,6 @@ struct nv50_wndw {
u16 ntfy;
u16 sema;
u32 data;
struct nv50_wndw_atom asy;
};
struct nv50_wndw_func {
......@@ -1581,151 +1542,6 @@ nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
&base->wndw.notify);
}
/******************************************************************************
* Page flipping channel
*****************************************************************************/
struct nouveau_bo *
nv50_display_crtc_sema(struct drm_device *dev, int crtc)
{
return nv50_disp(dev)->sync;
}
struct nv50_display_flip {
struct nv50_disp *disp;
struct nv50_base *base;
};
static bool
nv50_display_flip_wait(void *data)
{
struct nv50_display_flip *flip = data;
if (nouveau_bo_rd32(flip->disp->sync, flip->base->wndw.sema / 4) ==
flip->base->wndw.data)
return true;
usleep_range(1, 2);
return false;
}
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
struct nv50_base *base = nv50_head(crtc)->_base;
struct nv50_wndw *wndw = &base->wndw;
struct nv50_wndw_atom *asyw = &wndw->asy;
struct nv50_display_flip flip = {
.disp = nv50_disp(crtc->dev),
.base = base,
};
asyw->state.crtc = NULL;
asyw->state.fb = NULL;
nv50_wndw_atomic_check(&wndw->plane, &asyw->state);
nv50_wndw_flush_clr(wndw, 0, true, asyw);
nvif_msec(device, 2000,
if (nv50_display_flip_wait(&flip))
break;
);
}
int
nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan, u32 swap_interval)
{
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_head *head = nv50_head(crtc);
struct nv50_base *base = nv50_head(crtc)->_base;
struct nv50_wndw *wndw = &base->wndw;
struct nv50_wndw_atom *asyw = &wndw->asy;
int ret;
if (crtc->primary->fb->width != fb->width ||
crtc->primary->fb->height != fb->height)
return -EINVAL;
if (chan == NULL)
evo_sync(crtc->dev);
if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
ret = RING_SPACE(chan, 8);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, base->wndw.sema ^ 0x10);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, base->wndw.data + 1);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
OUT_RING (chan, base->wndw.sema);
OUT_RING (chan, base->wndw.data);
} else
if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + base->wndw.sema;
ret = RING_SPACE(chan, 12);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram.handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr ^ 0x10));
OUT_RING (chan, lower_32_bits(addr ^ 0x10));
OUT_RING (chan, base->wndw.data + 1);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr));
OUT_RING (chan, lower_32_bits(addr));
OUT_RING (chan, base->wndw.data);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
} else
if (chan) {
u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + base->wndw.sema;
ret = RING_SPACE(chan, 10);
if (ret)
return ret;
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr ^ 0x10));
OUT_RING (chan, lower_32_bits(addr ^ 0x10));
OUT_RING (chan, base->wndw.data + 1);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr));
OUT_RING (chan, lower_32_bits(addr));
OUT_RING (chan, base->wndw.data);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
}
if (chan) {
base->wndw.sema ^= 0x10;
base->wndw.data++;
FIRE_RING (chan);
}
/* queue the flip */
asyw->state.crtc = &head->base.base;
asyw->state.fb = fb;
asyw->interval = swap_interval;
asyw->image.handle = nv_fb->r_handle;
asyw->image.offset = nv_fb->nvbo->bo.offset;
asyw->sema.handle = base->chan.base.sync.handle;
asyw->sema.offset = base->wndw.sema;
asyw->sema.acquire = base->wndw.data++;
asyw->sema.release = base->wndw.data;
nv50_wndw_atomic_check(&wndw->plane, &asyw->state);
asyw->set.sema = true;
nv50_wndw_flush_set(wndw, 0, asyw);
nv50_wndw_wait_armed(wndw, asyw);
nouveau_bo_ref(nv_fb->nvbo, &head->image);
return 0;
}
/******************************************************************************
* Head
*****************************************************************************/
......@@ -2610,8 +2426,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
}
crtc = &head->base.base;
head->_base = base;
drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
&curs->wndw.plane, &nv50_crtc_func,
"head-%d", head->base.index);
......@@ -4061,7 +3875,6 @@ nv50_display_fini(struct drm_device *dev)
int
nv50_display_init(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
struct drm_encoder *encoder;
struct drm_plane *plane;
struct drm_crtc *crtc;
......@@ -4071,13 +3884,6 @@ nv50_display_init(struct drm_device *dev)
if (!push)
return -EBUSY;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nv50_wndw *wndw = &nv50_head(crtc)->_base->wndw;
nv50_crtc_lut_load(crtc);
nouveau_bo_wr32(disp->sync, wndw->sema / 4, wndw->data);
}
evo_mthd(push, 0x0088, 1);
evo_data(push, nv50_mast(dev)->base.sync.handle);
evo_kick(push, nv50_mast(dev));
......@@ -4094,6 +3900,10 @@ nv50_display_init(struct drm_device *dev)
}
}
drm_for_each_crtc(crtc, dev) {
nv50_crtc_lut_load(crtc);
}
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
......
......@@ -35,11 +35,4 @@ int nv50_display_create(struct drm_device *);
void nv50_display_destroy(struct drm_device *);
int nv50_display_init(struct drm_device *);
void nv50_display_fini(struct drm_device *);
void nv50_display_flip_stop(struct drm_crtc *);
int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *, u32 swap_interval);
struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
#endif /* __NV50_DISPLAY_H__ */
......@@ -35,13 +35,12 @@
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->drm->dev;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
int ret, i;
int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
......@@ -60,23 +59,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
.limit = limit,
}, sizeof(struct nv_dma_v0),
&fctx->sema);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
u32 start = bo->bo.mem.start * PAGE_SIZE;
u32 limit = start + bo->bo.mem.size - 1;
ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
}, sizeof(struct nv_dma_v0),
&fctx->head[i]);
}
if (ret)
nv10_fence_context_del(chan);
return ret;
......
......@@ -28,13 +28,6 @@
#include "nv50_display.h"
u64
nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
{
struct nv84_fence_chan *fctx = chan->fence;
return fctx->dispc_vma[crtc].offset;
}
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
......@@ -110,15 +103,8 @@ nv84_fence_read(struct nouveau_channel *chan)
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->drm->dev;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
int i;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
}
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
......@@ -134,7 +120,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
struct nouveau_cli *cli = (void *)chan->user.client;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
int ret, i;
int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
......@@ -154,12 +140,6 @@ nv84_fence_context_new(struct nouveau_channel *chan)
&fctx->vma_gart);
}
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
}
if (ret)
nv84_fence_context_del(chan);
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment