Commit 27f3d6cf authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/gr: convert user classes to new-style nvkm_object

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent b3c98150
......@@ -65,5 +65,5 @@ u64 nvif_device_time(struct nvif_device *);
#include <engine/sw.h>
#define nvxx_fifo(a) nvxx_device(a)->fifo
#define nvxx_gr(a) nvkm_gr(nvxx_device(a))
#define nvxx_gr(a) nvxx_device(a)->gr
#endif
......@@ -30,7 +30,6 @@ int nvkm_client_new(const char *name, u64 device, const char *cfg,
void nvkm_client_del(struct nvkm_client **);
int nvkm_client_init(struct nvkm_client *);
int nvkm_client_fini(struct nvkm_client *, bool suspend);
const char *nvkm_client_name(void *obj);
static inline struct nvkm_client *
nvkm_client(struct nvkm_object *object)
......
#ifndef __NVKM_GR_H__
#define __NVKM_GR_H__
#include <core/engctx.h>
struct nvkm_gr_chan {
struct nvkm_engctx base;
};
#define nvkm_gr_context_create(p,e,c,g,s,a,f,d) \
nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nvkm_gr_context_destroy(d) \
nvkm_engctx_destroy(&(d)->base)
#define nvkm_gr_context_init(d) \
nvkm_engctx_init(&(d)->base)
#define nvkm_gr_context_fini(d,s) \
nvkm_engctx_fini(&(d)->base, (s))
#define _nvkm_gr_context_dtor _nvkm_engctx_dtor
#define _nvkm_gr_context_init _nvkm_engctx_init
#define _nvkm_gr_context_fini _nvkm_engctx_fini
#define _nvkm_gr_context_rd32 _nvkm_engctx_rd32
#define _nvkm_gr_context_wr32 _nvkm_engctx_wr32
#include <core/engine.h>
struct nvkm_gr {
struct nvkm_engine engine;
const struct nvkm_gr_func *func;
/* Returns chipset-specific counts of units packed into an u64.
*/
u64 (*units)(struct nvkm_gr *);
};
static inline struct nvkm_gr *
nvkm_gr(void *obj)
{
return (void *)nvkm_engine(obj, NVDEV_ENGINE_GR);
}
#define nvkm_gr_create(p,e,c,y,d) \
nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
nvkm_gr_create_((p), (e), (c), (y), sizeof(**d), (void **)(d))
int
nvkm_gr_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, bool enable,
int length, void **pobject);
#define nvkm_gr_destroy(d) \
nvkm_engine_destroy(&(d)->engine)
#define nvkm_gr_init(d) \
......@@ -79,8 +57,7 @@ extern struct nvkm_oclass *gm20b_gr_oclass;
#include <core/enum.h>
extern const struct nvkm_bitfield nv04_gr_nsource[];
extern struct nvkm_ofuncs nv04_gr_ofuncs;
bool nv04_gr_idle(void *obj);
bool nv04_gr_idle(struct nvkm_gr *);
extern const struct nvkm_bitfield nv10_gr_intr_name[];
extern const struct nvkm_bitfield nv10_gr_nstatus[];
......
......@@ -321,13 +321,3 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
nvkm_client_del(pclient);
return ret;
}
const char *
nvkm_client_name(void *obj)
{
const char *client_name = "unknown";
struct nvkm_client *client = nvkm_client(obj);
if (client)
client_name = client->name;
return client_name;
}
nvkm-y += nvkm/engine/gr/base.o
nvkm-y += nvkm/engine/gr/nv04.o
nvkm-y += nvkm/engine/gr/nv10.o
nvkm-y += nvkm/engine/gr/nv20.o
......
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "priv.h"
#include <engine/fifo.h>
static int
nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
{
struct nvkm_gr *gr = nvkm_gr(oclass->engine);
int c = 0;
if (gr->func->object_get) {
int ret = gr->func->object_get(gr, index, &oclass->base);
if (oclass->base.oclass)
return index;
return ret;
}
while (gr->func->sclass[c].oclass) {
if (c++ == index) {
oclass->base = gr->func->sclass[index];
return index;
}
}
return c;
}
static int
nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nvkm_gr *gr = nvkm_gr(oclass->engine);
if (gr->func->chan_new)
return gr->func->chan_new(gr, chan, oclass, pobject);
return 0;
}
struct nvkm_engine_func
nvkm_gr = {
.fifo.cclass = nvkm_gr_cclass_new,
.fifo.sclass = nvkm_gr_oclass_get,
};
int
nvkm_gr_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, bool enable,
int length, void **pobject)
{
struct nvkm_gr *gr;
int ret;
ret = nvkm_engine_create_(parent, engine, oclass, enable,
"gr", "gr", length, pobject);
gr = *pobject;
if (ret)
return ret;
gr->engine.func = &nvkm_gr;
return 0;
}
......@@ -1027,23 +1027,23 @@ gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
void
gf100_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
}
void
gf100_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
......@@ -1054,9 +1054,9 @@ void
gf100_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(gr);
const u32 attrib = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 attrib = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
......@@ -1072,7 +1072,7 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
mmio_skip(info, o, (attrib << 16) | ++bo);
mmio_wr32(info, o, (attrib << 16) | --bo);
bo += impl->attrib_nr_max;
bo += grctx->attrib_nr_max;
}
}
}
......@@ -1237,22 +1237,22 @@ void
gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
nvkm_mc(gr)->unk260(nvkm_mc(gr), 0);
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gf100_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
......@@ -1260,16 +1260,16 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_grctx_generate_r418bb8(gr);
gf100_grctx_generate_r406800(gr);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000400);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc(gr)->unk260(nvkm_mc(gr), 1);
}
int
gf100_grctx_generate(struct gf100_gr *gr)
{
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_memory *chan;
......@@ -1352,7 +1352,7 @@ gf100_grctx_generate(struct gf100_gr *gr)
);
}
oclass->main(gr, &info);
grctx->main(gr, &info);
/* trigger a context unload by unsetting the "next channel valid" bit
* and faking a context switch interrupt
......@@ -1383,17 +1383,8 @@ gf100_grctx_generate(struct gf100_gr *gr)
return ret;
}
struct nvkm_oclass *
gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc0),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf100_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
......@@ -1409,4 +1400,4 @@ gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
};
......@@ -19,8 +19,7 @@ void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int)
#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
struct gf100_grctx_oclass {
struct nvkm_oclass base;
struct gf100_grctx_func {
/* main context generation function */
void (*main)(struct gf100_gr *, struct gf100_grctx *);
/* context-specific modify-on-first-load list generation function */
......@@ -50,13 +49,7 @@ struct gf100_grctx_oclass {
u32 alpha_nr;
};
static inline const struct gf100_grctx_oclass *
gf100_grctx_impl(struct gf100_gr *gr)
{
return (void *)nv_engine(gr)->cclass;
}
extern struct nvkm_oclass *gf100_grctx_oclass;
extern const struct gf100_grctx_func gf100_grctx;
int gf100_grctx_generate(struct gf100_gr *);
void gf100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gf100_grctx_generate_bundle(struct gf100_grctx *);
......@@ -69,20 +62,20 @@ void gf100_grctx_generate_r4060a8(struct gf100_gr *);
void gf100_grctx_generate_r418bb8(struct gf100_gr *);
void gf100_grctx_generate_r406800(struct gf100_gr *);
extern struct nvkm_oclass *gf108_grctx_oclass;
extern const struct gf100_grctx_func gf108_grctx;
void gf108_grctx_generate_attrib(struct gf100_grctx *);
void gf108_grctx_generate_unkn(struct gf100_gr *);
extern struct nvkm_oclass *gf104_grctx_oclass;
extern struct nvkm_oclass *gf110_grctx_oclass;
extern const struct gf100_grctx_func gf104_grctx;
extern const struct gf100_grctx_func gf110_grctx;
extern struct nvkm_oclass *gf117_grctx_oclass;
extern const struct gf100_grctx_func gf117_grctx;
void gf117_grctx_generate_attrib(struct gf100_grctx *);
extern struct nvkm_oclass *gf119_grctx_oclass;
extern const struct gf100_grctx_func gf119_grctx;
extern struct nvkm_oclass *gk104_grctx_oclass;
extern struct nvkm_oclass *gk20a_grctx_oclass;
extern const struct gf100_grctx_func gk104_grctx;
extern const struct gf100_grctx_func gk20a_grctx;
void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gk104_grctx_generate_bundle(struct gf100_grctx *);
void gk104_grctx_generate_pagepool(struct gf100_grctx *);
......@@ -95,22 +88,22 @@ void gm107_grctx_generate_bundle(struct gf100_grctx *);
void gm107_grctx_generate_pagepool(struct gf100_grctx *);
void gm107_grctx_generate_attrib(struct gf100_grctx *);
extern struct nvkm_oclass *gk110_grctx_oclass;
extern struct nvkm_oclass *gk110b_grctx_oclass;
extern struct nvkm_oclass *gk208_grctx_oclass;
extern const struct gf100_grctx_func gk110_grctx;
extern const struct gf100_grctx_func gk110b_grctx;
extern const struct gf100_grctx_func gk208_grctx;
extern struct nvkm_oclass *gm107_grctx_oclass;
extern const struct gf100_grctx_func gm107_grctx;
void gm107_grctx_generate_bundle(struct gf100_grctx *);
void gm107_grctx_generate_pagepool(struct gf100_grctx *);
void gm107_grctx_generate_attrib(struct gf100_grctx *);
extern struct nvkm_oclass *gm204_grctx_oclass;
extern const struct gf100_grctx_func gm204_grctx;
void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gm204_grctx_generate_tpcid(struct gf100_gr *);
void gm204_grctx_generate_405b60(struct gf100_gr *);
extern struct nvkm_oclass *gm206_grctx_oclass;
extern struct nvkm_oclass *gm20b_grctx_oclass;
extern const struct gf100_grctx_func gm206_grctx;
extern const struct gf100_grctx_func gm20b_grctx;
/* context init value lists */
......
......@@ -79,17 +79,8 @@ gf104_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc3),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf104_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
......@@ -105,4 +96,4 @@ gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
};
......@@ -731,17 +731,17 @@ void
gf108_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(gr);
const u32 alpha = impl->alpha_nr;
const u32 beta = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 alpha = grctx->alpha_nr;
const u32 beta = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
u32 ao = bo + impl->attrib_nr_max * gr->tpc_total;
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, tpc;
mmio_refn(info, 0x418810, 0x80000000, s, b);
......@@ -757,9 +757,9 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
const u32 o = TPC_UNIT(gpc, tpc, 0x500);
mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
bo += impl->attrib_nr_max;
bo += grctx->attrib_nr_max;
mmio_wr32(info, o + 0x44, (a << 16) | ao);
ao += impl->alpha_nr_max;
ao += grctx->alpha_nr_max;
}
}
}
......@@ -776,17 +776,8 @@ gf108_grctx_generate_unkn(struct gf100_gr *gr)
nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
}
struct nvkm_oclass *
gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc1),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf108_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf108_grctx_generate_unkn,
.hub = gf108_grctx_pack_hub,
......@@ -804,4 +795,4 @@ gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x324,
.alpha_nr = 0x218,
}.base;
};
......@@ -330,17 +330,8 @@ gf110_grctx_pack_gpc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc8),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf110_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
......@@ -356,4 +347,4 @@ gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
};
......@@ -183,17 +183,17 @@ void
gf117_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(gr);
const u32 alpha = impl->alpha_nr;
const u32 beta = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 alpha = grctx->alpha_nr;
const u32 beta = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
u32 ao = bo + impl->attrib_nr_max * gr->tpc_total;
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, ppc;
mmio_refn(info, 0x418810, 0x80000000, s, b);
......@@ -209,9 +209,9 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
const u32 o = PPC_UNIT(gpc, ppc, 0);
mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
bo += impl->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, o + 0xe4, (a << 16) | ao);
ao += impl->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
}
}
}
......@@ -220,23 +220,23 @@ void
gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
nvkm_mc(gr)->unk260(nvkm_mc(gr), 0);
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gf100_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
......@@ -247,23 +247,14 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
for (i = 0; i < 8; i++)
nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000400);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc(gr)->unk260(nvkm_mc(gr), 1);
}
struct nvkm_oclass *
gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xd7),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf117_grctx = {
.main = gf117_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gf117_grctx_pack_hub,
......@@ -282,4 +273,4 @@ gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x324,
}.base;
};
......@@ -498,17 +498,8 @@ gf119_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xd9),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf119_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf108_grctx_generate_unkn,
.hub = gf119_grctx_pack_hub,
......@@ -526,4 +517,4 @@ gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x324,
.alpha_nr = 0x218,
}.base;
};
......@@ -843,27 +843,27 @@ gk104_grctx_pack_ppc[] = {
void
gk104_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
impl->bundle_size / 0x20);
const u32 token_limit = impl->bundle_token_limit;
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
grctx->bundle_size / 0x20);
const u32 token_limit = grctx->bundle_token_limit;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
void
gk104_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
......@@ -955,23 +955,23 @@ void
gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
nvkm_mc(gr)->unk260(nvkm_mc(gr), 0);
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gf100_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
......@@ -985,26 +985,17 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gk104_grctx_generate_rop_active_fbps(gr);
nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000400);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc(gr)->unk260(nvkm_mc(gr), 1);
nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
}
struct nvkm_oclass *
gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xe4),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk104_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk104_grctx_pack_hub,
......@@ -1025,4 +1016,4 @@ gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
}.base;
};
......@@ -808,17 +808,8 @@ gk110_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf0),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk110_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk110_grctx_pack_hub,
......@@ -839,4 +830,4 @@ gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
}.base;
};
......@@ -69,17 +69,8 @@ gk110b_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf1),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk110b_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk110_grctx_pack_hub,
......@@ -100,4 +91,4 @@ gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
}.base;
};
......@@ -530,17 +530,8 @@ gk208_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk208_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk208_grctx_pack_hub,
......@@ -561,4 +552,4 @@ gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
}.base;
};
......@@ -29,7 +29,7 @@ static void
gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int idle_timeout_save;
int i;
......@@ -40,9 +40,9 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
idle_timeout_save = nvkm_rd32(device, 0x404154);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->attrib(info);
grctx->attrib(info);
oclass->unkn(gr);
grctx->unkn(gr);
gf100_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
......@@ -67,21 +67,12 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_wait_idle(gr);
gf100_gr_icmd(gr, gr->fuc_bundle);
oclass->pagepool(info);
oclass->bundle(info);
grctx->pagepool(info);
grctx->bundle(info);
}
struct nvkm_oclass *
gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xea),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk20a_grctx = {
.main = gk20a_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.bundle = gk104_grctx_generate_bundle,
......@@ -95,4 +86,4 @@ gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x240,
.alpha_nr_max = 0x648 + (0x648 / 2),
.alpha_nr = 0x648,
}.base;
};
......@@ -863,27 +863,27 @@ gm107_grctx_pack_ppc[] = {
void
gm107_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
impl->bundle_size / 0x20);
const u32 token_limit = impl->bundle_token_limit;
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
grctx->bundle_size / 0x20);
const u32 token_limit = grctx->bundle_token_limit;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418e24, 0x00000000, s, b);
mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x418e28, 0x80000000 | (grctx->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
void
gm107_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
......@@ -896,16 +896,16 @@ void
gm107_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_oclass *impl = (void *)gf100_grctx_impl(gr);
const u32 alpha = impl->alpha_nr;
const u32 attrib = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 alpha = grctx->alpha_nr;
const u32 attrib = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int max_batches = 0xffff;
u32 bo = 0;
u32 ao = bo + impl->attrib_nr_max * gr->tpc_total;
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, ppc, n = 0;
mmio_refn(info, 0x418810, 0x80000000, s, b);
......@@ -922,10 +922,10 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
const u32 o = PPC_UNIT(gpc, ppc, 0);
mmio_wr32(info, o + 0xc0, bs);
mmio_wr32(info, o + 0xf4, bo);
bo += impl->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, o + 0xe4, as);
mmio_wr32(info, o + 0xf8, ao);
ao += impl->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
}
}
......@@ -956,21 +956,21 @@ static void
gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gm107_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
......@@ -986,9 +986,9 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gk104_grctx_generate_rop_active_fbps(gr);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000400);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
......@@ -996,17 +996,8 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
}
struct nvkm_oclass *
gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gm107_grctx = {
.main = gm107_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm107_grctx_pack_hub,
......@@ -1027,4 +1018,4 @@ gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0xaa0,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
}.base;
};
......@@ -981,22 +981,22 @@ void
gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 tmp;
int i;
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gm204_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
......@@ -1016,25 +1016,16 @@ gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gm204_grctx_generate_405b60(gr);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000800);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
}
struct nvkm_oclass *
gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x24),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gm204_grctx = {
.main = gm204_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm204_grctx_pack_hub,
......@@ -1055,4 +1046,4 @@ gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x400,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
}.base;
};
......@@ -49,17 +49,8 @@ gm206_grctx_pack_gpc[] = {
{}
};
struct nvkm_oclass *
gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x26),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gm206_grctx = {
.main = gm204_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm204_grctx_pack_hub,
......@@ -80,4 +71,4 @@ gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x400,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
}.base;
};
......@@ -39,7 +39,7 @@ static void
gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int idle_timeout_save;
int i, tmp;
......@@ -50,9 +50,9 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
idle_timeout_save = nvkm_rd32(device, 0x404154);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->attrib(info);
grctx->attrib(info);
oclass->unkn(gr);
grctx->unkn(gr);
gm204_grctx_generate_tpcid(gr);
gm20b_grctx_generate_r406028(gr);
......@@ -81,21 +81,12 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_wait_idle(gr);
gf100_gr_icmd(gr, gr->fuc_bundle);
oclass->pagepool(info);
oclass->bundle(info);
grctx->pagepool(info);
grctx->bundle(info);
}
struct nvkm_oclass *
gm20b_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x2b),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gm20b_grctx = {
.main = gm20b_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.bundle = gm107_grctx_generate_bundle,
......@@ -109,4 +100,4 @@ gm20b_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x400,
.alpha_nr_max = 0xc00,
.alpha_nr = 0x800,
}.base;
};
......@@ -580,7 +580,6 @@ nv40_gr_construct_shader(struct nvkm_grctx *ctx)
if (ctx->mode != NVKM_GRCTX_VALS)
return;
nvkm_kmap(obj);
offset += 0x0280/4;
for (i = 0; i < 16; i++, offset += 2)
nvkm_wo32(obj, offset * 4, 0x3f800000);
......@@ -591,7 +590,6 @@ nv40_gr_construct_shader(struct nvkm_grctx *ctx)
for (i = 0; i < vs_nr_b1 * 4; i += 4)
nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
}
nvkm_done(obj);
}
static void
......
......@@ -125,8 +125,6 @@ gr_def(struct nvkm_grctx *ctx, u32 reg, u32 val)
reg = (reg - 0x00400000) / 4;
reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
nvkm_kmap(ctx->data);
nvkm_wo32(ctx->data, reg * 4, val);
nvkm_done(ctx->data);
}
#endif
......@@ -784,10 +784,8 @@ static void
dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
if (val && ctx->mode == NVKM_GRCTX_VALS) {
nvkm_kmap(ctx->data);
for (i = 0; i < num; i++)
nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
nvkm_done(ctx->data);
}
ctx->ctxvals_pos += num;
}
......@@ -1159,10 +1157,8 @@ static void
xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
if (val && ctx->mode == NVKM_GRCTX_VALS) {
nvkm_kmap(ctx->data);
for (i = 0; i < num; i++)
nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
nvkm_done(ctx->data);
}
ctx->ctxvals_pos += num << 3;
}
......
......@@ -223,12 +223,8 @@ gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
return -EINVAL;
}
struct nvkm_ofuncs
gf100_fermi_ofuncs = {
.ctor = _nvkm_object_ctor,
.dtor = nvkm_object_destroy,
.init = _nvkm_object_init,
.fini = _nvkm_object_fini,
const struct nvkm_object_func
gf100_fermi = {
.mthd = gf100_fermi_mthd,
};
......@@ -259,40 +255,106 @@ gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
return false;
}
struct nvkm_oclass
gf100_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
static int
gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
{
struct gf100_gr *gr = gf100_gr(base);
int c = 0;
while (gr->func->sclass[c].oclass) {
if (c++ == index) {
*sclass = gr->func->sclass[index];
return index;
}
}
return c;
}
/*******************************************************************************
* PGRAPH context
******************************************************************************/
int
gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *args, u32 size,
struct nvkm_object **pobject)
static int
gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_vm *vm = nvkm_client(parent)->vm;
struct gf100_gr *gr = (void *)engine;
struct gf100_gr_chan *chan = gf100_gr_chan(object);
struct gf100_gr *gr = chan->gr;
int ret, i;
ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
align, false, parent, pgpuobj);
if (ret)
return ret;
nvkm_kmap(*pgpuobj);
for (i = 0; i < gr->size; i += 4)
nvkm_wo32(*pgpuobj, i, gr->data[i / 4]);
if (!gr->firmware) {
nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
} else {
nvkm_wo32(*pgpuobj, 0xf4, 0);
nvkm_wo32(*pgpuobj, 0xf8, 0);
nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
nvkm_wo32(*pgpuobj, 0x1c, 1);
nvkm_wo32(*pgpuobj, 0x20, 0);
nvkm_wo32(*pgpuobj, 0x28, 0);
nvkm_wo32(*pgpuobj, 0x2c, 0);
}
nvkm_done(*pgpuobj);
return 0;
}
static void *
gf100_gr_chan_dtor(struct nvkm_object *object)
{
struct gf100_gr_chan *chan = gf100_gr_chan(object);
int i;
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
if (chan->data[i].vma.node) {
nvkm_vm_unmap(&chan->data[i].vma);
nvkm_vm_put(&chan->data[i].vma);
}
nvkm_memory_del(&chan->data[i].mem);
}
if (chan->mmio_vma.node) {
nvkm_vm_unmap(&chan->mmio_vma);
nvkm_vm_put(&chan->mmio_vma);
}
nvkm_memory_del(&chan->mmio);
return chan;
}
static const struct nvkm_object_func
gf100_gr_chan = {
.dtor = gf100_gr_chan_dtor,
.bind = gf100_gr_chan_bind,
};
static int
gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct gf100_gr *gr = gf100_gr(base);
struct gf100_gr_data *data = gr->mmio_data;
struct gf100_gr_mmio *mmio = gr->mmio_list;
struct gf100_gr_chan *chan;
struct nvkm_device *device = gr->base.engine.subdev.device;
struct nvkm_gpuobj *image;
int ret, i;
/* allocate memory for context, and fill with default values */
ret = nvkm_gr_context_create(parent, engine, oclass, NULL,
gr->size, 0x100,
NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
chan->gr = gr;
*pobject = &chan->object;
/* allocate memory for a "mmio list" buffer that's used by the HUB
* fuc to modify some per-context register settings on first load
......@@ -303,7 +365,7 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
ret = nvkm_vm_get(vm, 0x1000, 12, NV_MEM_ACCESS_RW |
ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, &chan->mmio_vma);
if (ret)
return ret;
......@@ -318,8 +380,9 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
ret = nvkm_vm_get(vm, nvkm_memory_size(chan->data[i].mem),
12, data->access, &chan->data[i].vma);
ret = nvkm_vm_get(fifoch->vm,
nvkm_memory_size(chan->data[i].mem), 12,
data->access, &chan->data[i].vma);
if (ret)
return ret;
......@@ -343,53 +406,9 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
mmio++;
}
nvkm_done(chan->mmio);
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
for (i = 0; i < gr->size; i += 4)
nvkm_wo32(image, i, gr->data[i / 4]);
if (!gr->firmware) {
nvkm_wo32(image, 0x00, chan->mmio_nr / 2);
nvkm_wo32(image, 0x04, chan->mmio_vma.offset >> 8);
} else {
nvkm_wo32(image, 0xf4, 0);
nvkm_wo32(image, 0xf8, 0);
nvkm_wo32(image, 0x10, chan->mmio_nr / 2);
nvkm_wo32(image, 0x14, lower_32_bits(chan->mmio_vma.offset));
nvkm_wo32(image, 0x18, upper_32_bits(chan->mmio_vma.offset));
nvkm_wo32(image, 0x1c, 1);
nvkm_wo32(image, 0x20, 0);
nvkm_wo32(image, 0x28, 0);
nvkm_wo32(image, 0x2c, 0);
}
nvkm_done(image);
return 0;
}
void
gf100_gr_context_dtor(struct nvkm_object *object)
{
struct gf100_gr_chan *chan = (void *)object;
int i;
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
if (chan->data[i].vma.node) {
nvkm_vm_unmap(&chan->data[i].vma);
nvkm_vm_put(&chan->data[i].vma);
}
nvkm_memory_del(&chan->data[i].mem);
}
if (chan->mmio_vma.node) {
nvkm_vm_unmap(&chan->mmio_vma);
nvkm_vm_put(&chan->mmio_vma);
}
nvkm_memory_del(&chan->mmio);
nvkm_gr_context_destroy(&chan->base);
}
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -1312,10 +1331,10 @@ gf100_gr_init_csdata(struct gf100_gr *gr,
int
gf100_gr_init_ctxctl(struct gf100_gr *gr)
{
const struct gf100_grctx_func *grctx = gr->func->grctx;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct gf100_gr_oclass *oclass = (void *)nv_object(gr)->oclass;
struct gf100_grctx_oclass *cclass = (void *)nv_engine(gr)->cclass;
int i;
if (gr->firmware) {
......@@ -1446,10 +1465,10 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
nvkm_mc(gr)->unk260(nvkm_mc(gr), 1);
/* load register lists */
gf100_gr_init_csdata(gr, cclass->hub, 0x409000, 0x000, 0x000000);
gf100_gr_init_csdata(gr, cclass->gpc, 0x41a000, 0x000, 0x418000);
gf100_gr_init_csdata(gr, cclass->tpc, 0x41a000, 0x004, 0x419800);
gf100_gr_init_csdata(gr, cclass->ppc, 0x41a000, 0x008, 0x41be00);
gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
/* start HUB ucode running, it'll init the GPCs */
nvkm_wr32(device, 0x40910c, 0x00000000);
......@@ -1646,6 +1665,12 @@ gf100_gr_dtor(struct nvkm_object *object)
nvkm_gr_destroy(&gr->base);
}
static const struct nvkm_gr_func
gf100_gr_ = {
.chan_new = gf100_gr_chan_new,
.object_get = gf100_gr_object_get,
};
int
gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *bclass, void *data, u32 size,
......@@ -1666,6 +1691,8 @@ gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->func = oclass->func;
gr->base.func = &gf100_gr_;
nv_subdev(gr)->unit = 0x08001000;
nv_subdev(gr)->intr = gf100_gr_intr;
......@@ -1752,8 +1779,6 @@ gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
break;
}
nv_engine(gr)->cclass = *oclass->cclass;
nv_engine(gr)->sclass = oclass->sclass;
return 0;
}
......@@ -1777,6 +1802,18 @@ gf100_gr_gpccs_ucode = {
.data.size = sizeof(gf100_grgpc_data),
};
static const struct gf100_gr_func
gf100_gr = {
.grctx = &gf100_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf100_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc0),
......@@ -1786,8 +1823,7 @@ gf100_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf100_grctx_oclass,
.sclass = gf100_gr_sclass,
.func = &gf100_gr,
.mmio = gf100_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
......
......@@ -23,9 +23,12 @@
*/
#ifndef __NVC0_GR_H__
#define __NVC0_GR_H__
#include <engine/gr.h>
#define gf100_gr(p) container_of((p), struct gf100_gr, base)
#include "priv.h"
#include <core/gpuobj.h>
#include <subdev/ltc.h>
#include <subdev/mmu.h>
#define GPC_MAX 32
#define TPC_MAX (GPC_MAX * 8)
......@@ -69,6 +72,7 @@ struct gf100_gr_zbc_depth {
struct gf100_gr {
struct nvkm_gr base;
const struct gf100_gr_func *func;
struct gf100_gr_fuc fuc409c;
struct gf100_gr_fuc fuc409d;
......@@ -106,23 +110,27 @@ struct gf100_gr {
u8 magic_not_rop_nr;
};
struct gf100_gr_func {
const struct gf100_grctx_func *grctx;
struct nvkm_sclass sclass[];
};
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
struct gf100_gr_chan {
struct nvkm_gr_chan base;
struct nvkm_object object;
struct gf100_gr *gr;
struct nvkm_memory *mmio;
struct nvkm_vma mmio_vma;
int mmio_nr;
struct {
struct nvkm_memory *mem;
struct nvkm_vma vma;
} data[4];
};
int gf100_gr_context_ctor(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
void gf100_gr_context_dtor(struct nvkm_object *);
void gf100_gr_ctxctl_debug(struct gf100_gr *);
void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
......@@ -149,7 +157,7 @@ int gk20a_gr_init(struct nvkm_object *);
int gm204_gr_init(struct nvkm_object *);
extern struct nvkm_ofuncs gf100_fermi_ofuncs;
extern const struct nvkm_object_func gf100_fermi;
extern struct nvkm_oclass gf100_gr_sclass[];
extern struct nvkm_oclass gf110_gr_sclass[];
......@@ -185,8 +193,7 @@ extern struct gf100_gr_ucode gk110_gr_gpccs_ucode;
struct gf100_gr_oclass {
struct nvkm_oclass base;
struct nvkm_oclass **cclass;
struct nvkm_oclass *sclass;
const struct gf100_gr_func *func;
const struct gf100_gr_pack *mmio;
struct {
struct gf100_gr_ucode *ucode;
......
......@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -110,6 +112,18 @@ gf104_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gf104_gr = {
.grctx = &gf104_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf104_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc3),
......@@ -119,8 +133,7 @@ gf104_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf104_grctx_oclass,
.sclass = gf100_gr_sclass,
.func = &gf104_gr,
.mmio = gf104_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
......
......@@ -26,20 +26,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
gf108_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs },
{ FERMI_B, &gf100_fermi_ofuncs },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -117,6 +103,19 @@ gf108_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gf108_gr = {
.grctx = &gf108_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_B, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf108_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc1),
......@@ -126,8 +125,7 @@ gf108_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf108_grctx_oclass,
.sclass = gf108_gr_sclass,
.func = &gf108_gr,
.mmio = gf108_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
......
......@@ -26,21 +26,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
struct nvkm_oclass
gf110_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs },
{ FERMI_B, &gf100_fermi_ofuncs },
{ FERMI_C, &gf100_fermi_ofuncs },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -99,6 +84,20 @@ gf110_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gf110_gr = {
.grctx = &gf110_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_B, &gf100_fermi },
{ -1, -1, FERMI_C, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf110_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc8),
......@@ -108,8 +107,7 @@ gf110_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf110_grctx_oclass,
.sclass = gf110_gr_sclass,
.func = &gf110_gr,
.mmio = gf110_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
......
......@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -118,6 +120,20 @@ gf117_gr_gpccs_ucode = {
.data.size = sizeof(gf117_grgpc_data),
};
static const struct gf100_gr_func
gf117_gr = {
.grctx = &gf117_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_B, &gf100_fermi },
{ -1, -1, FERMI_C, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf117_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xd7),
......@@ -127,8 +143,7 @@ gf117_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf117_grctx_oclass,
.sclass = gf110_gr_sclass,
.func = &gf117_gr,
.mmio = gf117_gr_pack_mmio,
.fecs.ucode = &gf117_gr_fecs_ucode,
.gpccs.ucode = &gf117_gr_gpccs_ucode,
......
......@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -173,6 +175,20 @@ gf119_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gf119_gr = {
.grctx = &gf119_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_B, &gf100_fermi },
{ -1, -1, FERMI_C, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf119_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xd9),
......@@ -182,8 +198,7 @@ gf119_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf119_grctx_oclass,
.sclass = gf110_gr_sclass,
.func = &gf119_gr,
.mmio = gf119_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
......
......@@ -28,19 +28,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
gk104_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
{ KEPLER_A, &gf100_fermi_ofuncs },
{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -311,6 +298,18 @@ gk104_gr_init(struct nvkm_object *object)
return gf100_gr_init_ctxctl(gr);
}
static const struct gf100_gr_func
gk104_gr = {
.grctx = &gk104_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
{ -1, -1, KEPLER_A, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_A },
{}
}
};
int
gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
......@@ -351,8 +350,7 @@ gk104_gr_oclass = &(struct gf100_gr_oclass) {
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk104_grctx_oclass,
.sclass = gk104_gr_sclass,
.func = &gk104_gr,
.mmio = gk104_gr_pack_mmio,
.fecs.ucode = &gk104_gr_fecs_ucode,
.gpccs.ucode = &gk104_gr_gpccs_ucode,
......
......@@ -28,19 +28,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
struct nvkm_oclass
gk110_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ KEPLER_B, &gf100_fermi_ofuncs },
{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -193,6 +180,18 @@ gk110_gr_gpccs_ucode = {
.data.size = sizeof(gk110_grgpc_data),
};
static const struct gf100_gr_func
gk110_gr = {
.grctx = &gk110_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, KEPLER_B, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gk110_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf0),
......@@ -202,8 +201,7 @@ gk110_gr_oclass = &(struct gf100_gr_oclass) {
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk110_grctx_oclass,
.sclass = gk110_gr_sclass,
.func = &gk110_gr,
.mmio = gk110_gr_pack_mmio,
.fecs.ucode = &gk110_gr_fecs_ucode,
.gpccs.ucode = &gk110_gr_gpccs_ucode,
......
......@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -98,6 +100,18 @@ gk110b_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gk110b_gr = {
.grctx = &gk110b_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, KEPLER_B, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gk110b_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf1),
......@@ -107,8 +121,7 @@ gk110b_gr_oclass = &(struct gf100_gr_oclass) {
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk110b_grctx_oclass,
.sclass = gk110_gr_sclass,
.func = &gk110b_gr,
.mmio = gk110b_gr_pack_mmio,
.fecs.ucode = &gk110_gr_fecs_ucode,
.gpccs.ucode = &gk110_gr_gpccs_ucode,
......
......@@ -28,19 +28,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
gk208_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ KEPLER_B, &gf100_fermi_ofuncs },
{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -172,6 +159,18 @@ gk208_gr_gpccs_ucode = {
.data.size = sizeof(gk208_grgpc_data),
};
static const struct gf100_gr_func
gk208_gr = {
.grctx = &gk208_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, KEPLER_B, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gk208_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x08),
......@@ -181,8 +180,7 @@ gk208_gr_oclass = &(struct gf100_gr_oclass) {
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk208_grctx_oclass,
.sclass = gk208_gr_sclass,
.func = &gk208_gr,
.mmio = gk208_gr_pack_mmio,
.fecs.ucode = &gk208_gr_fecs_ucode,
.gpccs.ucode = &gk208_gr_gpccs_ucode,
......
......@@ -22,17 +22,9 @@
#include "gk20a.h"
#include "ctxgf100.h"
#include <nvif/class.h>
#include <subdev/timer.h>
static struct nvkm_oclass
gk20a_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
{ KEPLER_C, &gf100_fermi_ofuncs },
{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
#include <nvif/class.h>
static void
gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
......@@ -350,6 +342,18 @@ gk20a_gr_init(struct nvkm_object *object)
return gf100_gr_init_ctxctl(gr);
}
static const struct gf100_gr_func
gk20a_gr = {
.grctx = &gk20a_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
{ -1, -1, KEPLER_C, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gk20a_gr_oclass = &(struct gk20a_gr_oclass) {
.gf100 = {
......@@ -360,8 +364,7 @@ gk20a_gr_oclass = &(struct gk20a_gr_oclass) {
.init = gk20a_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk20a_grctx_oclass,
.sclass = gk20a_gr_sclass,
.func = &gk20a_gr,
.ppc_nr = 1,
},
.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
......
......@@ -29,19 +29,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
gm107_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ MAXWELL_A, &gf100_fermi_ofuncs },
{ MAXWELL_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -454,6 +441,18 @@ gm107_gr_gpccs_ucode = {
.data.size = sizeof(gm107_grgpc_data),
};
static const struct gf100_gr_func
gm107_gr = {
.grctx = &gm107_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, MAXWELL_A, &gf100_fermi },
{ -1, -1, MAXWELL_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gm107_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x07),
......@@ -463,8 +462,7 @@ gm107_gr_oclass = &(struct gf100_gr_oclass) {
.init = gm107_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gm107_grctx_oclass,
.sclass = gm107_gr_sclass,
.func = &gm107_gr,
.mmio = gm107_gr_pack_mmio,
.fecs.ucode = &gm107_gr_fecs_ucode,
.gpccs.ucode = &gm107_gr_gpccs_ucode,
......
......@@ -26,19 +26,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
struct nvkm_oclass
gm204_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ MAXWELL_B, &gf100_fermi_ofuncs },
{ MAXWELL_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
......@@ -371,6 +358,18 @@ gm204_gr_init(struct nvkm_object *object)
return gm204_gr_init_ctxctl(gr);
}
static const struct gf100_gr_func
gm204_gr = {
.grctx = &gm204_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, MAXWELL_B, &gf100_fermi },
{ -1, -1, MAXWELL_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gm204_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x24),
......@@ -380,8 +379,7 @@ gm204_gr_oclass = &(struct gf100_gr_oclass) {
.init = gm204_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gm204_grctx_oclass,
.sclass = gm204_gr_sclass,
.func = &gm204_gr,
.mmio = gm204_gr_pack_mmio,
.ppc_nr = 2,
}.base;
......@@ -24,6 +24,20 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
static const struct gf100_gr_func
gm206_gr = {
.grctx = &gm206_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, MAXWELL_B, &gf100_fermi },
{ -1, -1, MAXWELL_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gm206_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x26),
......@@ -33,8 +47,7 @@ gm206_gr_oclass = &(struct gf100_gr_oclass) {
.init = gm204_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gm206_grctx_oclass,
.sclass = gm204_gr_sclass,
.func = &gm206_gr,
.mmio = gm204_gr_pack_mmio,
.ppc_nr = 2,
}.base;
......@@ -22,17 +22,9 @@
#include "gk20a.h"
#include "ctxgf100.h"
#include <nvif/class.h>
#include <subdev/timer.h>
static struct nvkm_oclass
gm20b_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ MAXWELL_B, &gf100_fermi_ofuncs },
{ MAXWELL_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
#include <nvif/class.h>
static void
gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
......@@ -67,6 +59,18 @@ gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
nvkm_wr32(device, 0x419e4c, 0x5);
}
static const struct gf100_gr_func
gm20b_gr = {
.grctx = &gm20b_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, MAXWELL_B, &gf100_fermi },
{ -1, -1, MAXWELL_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gm20b_gr_oclass = &(struct gk20a_gr_oclass) {
.gf100 = {
......@@ -77,8 +81,7 @@ gm20b_gr_oclass = &(struct gk20a_gr_oclass) {
.init = gk20a_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gm20b_grctx_oclass,
.sclass = gm20b_gr_sclass,
.func = &gm20b_gr,
.ppc_nr = 1,
},
.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
......
#ifndef __NV20_GR_H__
#define __NV20_GR_H__
#include <engine/gr.h>
#define nv20_gr(p) container_of((p), struct nv20_gr, base)
#include "priv.h"
struct nv20_gr {
struct nvkm_gr base;
struct nvkm_memory *ctxtab;
};
#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
struct nv20_gr_chan {
struct nvkm_gr_chan base;
struct nvkm_object object;
struct nv20_gr *gr;
int chid;
struct nvkm_memory *inst;
};
extern struct nvkm_oclass nv25_gr_sclass[];
int nv20_gr_context_init(struct nvkm_object *);
int nv20_gr_context_fini(struct nvkm_object *, bool);
void *nv20_gr_chan_dtor(struct nvkm_object *);
int nv20_gr_chan_init(struct nvkm_object *);
int nv20_gr_chan_fini(struct nvkm_object *, bool);
void nv20_gr_tile_prog(struct nvkm_engine *, int);
void nv20_gr_intr(struct nvkm_subdev *);
......
......@@ -5,126 +5,122 @@
#include <engine/fifo/chan.h>
/*******************************************************************************
* Graphics object classes
* PGRAPH context
******************************************************************************/
struct nvkm_oclass
nv25_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{ 0x0597, &nv04_gr_ofuncs, NULL }, /* kelvin */
{},
static const struct nvkm_object_func
nv25_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv25_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x3724, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x035c, 0xffff0000);
nvkm_wo32(image, 0x03c0, 0x0fff0000);
nvkm_wo32(image, 0x03c4, 0x0fff0000);
nvkm_wo32(image, 0x049c, 0x00000101);
nvkm_wo32(image, 0x04b0, 0x00000111);
nvkm_wo32(image, 0x04c8, 0x00000080);
nvkm_wo32(image, 0x04cc, 0xffff0000);
nvkm_wo32(image, 0x04d0, 0x00000001);
nvkm_wo32(image, 0x04e4, 0x44400000);
nvkm_wo32(image, 0x04fc, 0x4b800000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x035c, 0xffff0000);
nvkm_wo32(chan->inst, 0x03c0, 0x0fff0000);
nvkm_wo32(chan->inst, 0x03c4, 0x0fff0000);
nvkm_wo32(chan->inst, 0x049c, 0x00000101);
nvkm_wo32(chan->inst, 0x04b0, 0x00000111);
nvkm_wo32(chan->inst, 0x04c8, 0x00000080);
nvkm_wo32(chan->inst, 0x04cc, 0xffff0000);
nvkm_wo32(chan->inst, 0x04d0, 0x00000001);
nvkm_wo32(chan->inst, 0x04e4, 0x44400000);
nvkm_wo32(chan->inst, 0x04fc, 0x4b800000);
for (i = 0x0510; i <= 0x051c; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x0530; i <= 0x053c; i += 4)
nvkm_wo32(image, i, 0x00080000);
nvkm_wo32(chan->inst, i, 0x00080000);
for (i = 0x0548; i <= 0x0554; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0558; i <= 0x0564; i += 4)
nvkm_wo32(image, i, 0x000105b8);
nvkm_wo32(chan->inst, i, 0x000105b8);
for (i = 0x0568; i <= 0x0574; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(chan->inst, i, 0x00080008);
for (i = 0x0598; i <= 0x05d4; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x05e0, 0x4b7fffff);
nvkm_wo32(image, 0x0620, 0x00000080);
nvkm_wo32(image, 0x0624, 0x30201000);
nvkm_wo32(image, 0x0628, 0x70605040);
nvkm_wo32(image, 0x062c, 0xb0a09080);
nvkm_wo32(image, 0x0630, 0xf0e0d0c0);
nvkm_wo32(image, 0x0664, 0x00000001);
nvkm_wo32(image, 0x066c, 0x00004000);
nvkm_wo32(image, 0x0678, 0x00000001);
nvkm_wo32(image, 0x0680, 0x00040000);
nvkm_wo32(image, 0x0684, 0x00010000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x05e0, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x0620, 0x00000080);
nvkm_wo32(chan->inst, 0x0624, 0x30201000);
nvkm_wo32(chan->inst, 0x0628, 0x70605040);
nvkm_wo32(chan->inst, 0x062c, 0xb0a09080);
nvkm_wo32(chan->inst, 0x0630, 0xf0e0d0c0);
nvkm_wo32(chan->inst, 0x0664, 0x00000001);
nvkm_wo32(chan->inst, 0x066c, 0x00004000);
nvkm_wo32(chan->inst, 0x0678, 0x00000001);
nvkm_wo32(chan->inst, 0x0680, 0x00040000);
nvkm_wo32(chan->inst, 0x0684, 0x00010000);
for (i = 0x1b04; i <= 0x2374; i += 16) {
nvkm_wo32(image, (i + 0), 0x10700ff9);
nvkm_wo32(image, (i + 4), 0x0436086c);
nvkm_wo32(image, (i + 8), 0x000c001b);
nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
}
nvkm_wo32(image, 0x2704, 0x3f800000);
nvkm_wo32(image, 0x2718, 0x3f800000);
nvkm_wo32(image, 0x2744, 0x40000000);
nvkm_wo32(image, 0x2748, 0x3f800000);
nvkm_wo32(image, 0x274c, 0x3f000000);
nvkm_wo32(image, 0x2754, 0x40000000);
nvkm_wo32(image, 0x2758, 0x3f800000);
nvkm_wo32(image, 0x2760, 0xbf800000);
nvkm_wo32(image, 0x2768, 0xbf800000);
nvkm_wo32(image, 0x308c, 0x000fe000);
nvkm_wo32(image, 0x3108, 0x000003f8);
nvkm_wo32(image, 0x3468, 0x002fe000);
nvkm_wo32(chan->inst, 0x2704, 0x3f800000);
nvkm_wo32(chan->inst, 0x2718, 0x3f800000);
nvkm_wo32(chan->inst, 0x2744, 0x40000000);
nvkm_wo32(chan->inst, 0x2748, 0x3f800000);
nvkm_wo32(chan->inst, 0x274c, 0x3f000000);
nvkm_wo32(chan->inst, 0x2754, 0x40000000);
nvkm_wo32(chan->inst, 0x2758, 0x3f800000);
nvkm_wo32(chan->inst, 0x2760, 0xbf800000);
nvkm_wo32(chan->inst, 0x2768, 0xbf800000);
nvkm_wo32(chan->inst, 0x308c, 0x000fe000);
nvkm_wo32(chan->inst, 0x3108, 0x000003f8);
nvkm_wo32(chan->inst, 0x3468, 0x002fe000);
for (i = 0x3484; i <= 0x34a0; i += 4)
nvkm_wo32(image, i, 0x001c527c);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x001c527c);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv25_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x25),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv25_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv25_gr = {
.chan_new = nv25_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
static int
nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
......@@ -139,6 +135,8 @@ nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv25_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
......@@ -146,8 +144,6 @@ nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv25_gr_cclass;
nv_engine(gr)->sclass = nv25_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}
......
......@@ -8,90 +8,110 @@
* PGRAPH context
******************************************************************************/
static const struct nvkm_object_func
nv2a_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
static int
nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv2a_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x36b0, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0000, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x033c, 0xffff0000);
nvkm_wo32(image, 0x03a0, 0x0fff0000);
nvkm_wo32(image, 0x03a4, 0x0fff0000);
nvkm_wo32(image, 0x047c, 0x00000101);
nvkm_wo32(image, 0x0490, 0x00000111);
nvkm_wo32(image, 0x04a8, 0x44400000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
nvkm_wo32(chan->inst, 0x047c, 0x00000101);
nvkm_wo32(chan->inst, 0x0490, 0x00000111);
nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
nvkm_wo32(image, i, 0x00080000);
nvkm_wo32(chan->inst, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
nvkm_wo32(image, i, 0x000105b8);
nvkm_wo32(chan->inst, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(chan->inst, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x05a4, 0x4b7fffff);
nvkm_wo32(image, 0x05fc, 0x00000001);
nvkm_wo32(image, 0x0604, 0x00004000);
nvkm_wo32(image, 0x0610, 0x00000001);
nvkm_wo32(image, 0x0618, 0x00040000);
nvkm_wo32(image, 0x061c, 0x00010000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
nvkm_wo32(chan->inst, 0x0604, 0x00004000);
nvkm_wo32(chan->inst, 0x0610, 0x00000001);
nvkm_wo32(chan->inst, 0x0618, 0x00040000);
nvkm_wo32(chan->inst, 0x061c, 0x00010000);
for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
nvkm_wo32(image, (i + 0), 0x10700ff9);
nvkm_wo32(image, (i + 4), 0x0436086c);
nvkm_wo32(image, (i + 8), 0x000c001b);
nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
}
nvkm_wo32(image, 0x269c, 0x3f800000);
nvkm_wo32(image, 0x26b0, 0x3f800000);
nvkm_wo32(image, 0x26dc, 0x40000000);
nvkm_wo32(image, 0x26e0, 0x3f800000);
nvkm_wo32(image, 0x26e4, 0x3f000000);
nvkm_wo32(image, 0x26ec, 0x40000000);
nvkm_wo32(image, 0x26f0, 0x3f800000);
nvkm_wo32(image, 0x26f8, 0xbf800000);
nvkm_wo32(image, 0x2700, 0xbf800000);
nvkm_wo32(image, 0x3024, 0x000fe000);
nvkm_wo32(image, 0x30a0, 0x000003f8);
nvkm_wo32(image, 0x33fc, 0x002fe000);
nvkm_wo32(chan->inst, 0x269c, 0x3f800000);
nvkm_wo32(chan->inst, 0x26b0, 0x3f800000);
nvkm_wo32(chan->inst, 0x26dc, 0x40000000);
nvkm_wo32(chan->inst, 0x26e0, 0x3f800000);
nvkm_wo32(chan->inst, 0x26e4, 0x3f000000);
nvkm_wo32(chan->inst, 0x26ec, 0x40000000);
nvkm_wo32(chan->inst, 0x26f0, 0x3f800000);
nvkm_wo32(chan->inst, 0x26f8, 0xbf800000);
nvkm_wo32(chan->inst, 0x2700, 0xbf800000);
nvkm_wo32(chan->inst, 0x3024, 0x000fe000);
nvkm_wo32(chan->inst, 0x30a0, 0x000003f8);
nvkm_wo32(chan->inst, 0x33fc, 0x002fe000);
for (i = 0x341c; i <= 0x3438; i += 4)
nvkm_wo32(image, i, 0x001c527c);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x001c527c);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv2a_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x2a),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv2a_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv2a_gr = {
.chan_new = nv2a_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
static int
nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
......@@ -106,6 +126,8 @@ nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv2a_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
......@@ -113,8 +135,6 @@ nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv2a_gr_cclass;
nv_engine(gr)->sclass = nv25_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}
......
......@@ -6,127 +6,123 @@
#include <subdev/fb.h>
/*******************************************************************************
* Graphics object classes
* PGRAPH context
******************************************************************************/
static struct nvkm_oclass
nv30_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0397, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
static const struct nvkm_object_func
nv30_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x5f48, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x0410, 0x00000101);
nvkm_wo32(image, 0x0424, 0x00000111);
nvkm_wo32(image, 0x0428, 0x00000060);
nvkm_wo32(image, 0x0444, 0x00000080);
nvkm_wo32(image, 0x0448, 0xffff0000);
nvkm_wo32(image, 0x044c, 0x00000001);
nvkm_wo32(image, 0x0460, 0x44400000);
nvkm_wo32(image, 0x048c, 0xffff0000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x0410, 0x00000101);
nvkm_wo32(chan->inst, 0x0424, 0x00000111);
nvkm_wo32(chan->inst, 0x0428, 0x00000060);
nvkm_wo32(chan->inst, 0x0444, 0x00000080);
nvkm_wo32(chan->inst, 0x0448, 0xffff0000);
nvkm_wo32(chan->inst, 0x044c, 0x00000001);
nvkm_wo32(chan->inst, 0x0460, 0x44400000);
nvkm_wo32(chan->inst, 0x048c, 0xffff0000);
for (i = 0x04e0; i < 0x04e8; i += 4)
nvkm_wo32(image, i, 0x0fff0000);
nvkm_wo32(image, 0x04ec, 0x00011100);
nvkm_wo32(chan->inst, i, 0x0fff0000);
nvkm_wo32(chan->inst, 0x04ec, 0x00011100);
for (i = 0x0508; i < 0x0548; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x0550, 0x4b7fffff);
nvkm_wo32(image, 0x058c, 0x00000080);
nvkm_wo32(image, 0x0590, 0x30201000);
nvkm_wo32(image, 0x0594, 0x70605040);
nvkm_wo32(image, 0x0598, 0xb8a89888);
nvkm_wo32(image, 0x059c, 0xf8e8d8c8);
nvkm_wo32(image, 0x05b0, 0xb0000000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x058c, 0x00000080);
nvkm_wo32(chan->inst, 0x0590, 0x30201000);
nvkm_wo32(chan->inst, 0x0594, 0x70605040);
nvkm_wo32(chan->inst, 0x0598, 0xb8a89888);
nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8);
nvkm_wo32(chan->inst, 0x05b0, 0xb0000000);
for (i = 0x0600; i < 0x0640; i += 4)
nvkm_wo32(image, i, 0x00010588);
nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0640; i < 0x0680; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06c0; i < 0x0700; i += 4)
nvkm_wo32(image, i, 0x0008aae4);
nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x0700; i < 0x0740; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0740; i < 0x0780; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(image, 0x085c, 0x00040000);
nvkm_wo32(image, 0x0860, 0x00010000);
nvkm_wo32(chan->inst, i, 0x00080008);
nvkm_wo32(chan->inst, 0x085c, 0x00040000);
nvkm_wo32(chan->inst, 0x0860, 0x00010000);
for (i = 0x0864; i < 0x0874; i += 4)
nvkm_wo32(image, i, 0x00040004);
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
nvkm_wo32(image, i + 0, 0x10700ff9);
nvkm_wo32(image, i + 1, 0x0436086c);
nvkm_wo32(image, i + 2, 0x000c001b);
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
nvkm_wo32(chan->inst, i + 1, 0x0436086c);
nvkm_wo32(chan->inst, i + 2, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
nvkm_wo32(image, i, 0x0000ffff);
nvkm_wo32(image, 0x344c, 0x3f800000);
nvkm_wo32(image, 0x3808, 0x3f800000);
nvkm_wo32(image, 0x381c, 0x3f800000);
nvkm_wo32(image, 0x3848, 0x40000000);
nvkm_wo32(image, 0x384c, 0x3f800000);
nvkm_wo32(image, 0x3850, 0x3f000000);
nvkm_wo32(image, 0x3858, 0x40000000);
nvkm_wo32(image, 0x385c, 0x3f800000);
nvkm_wo32(image, 0x3864, 0xbf800000);
nvkm_wo32(image, 0x386c, 0xbf800000);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x0000ffff);
nvkm_wo32(chan->inst, 0x344c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3808, 0x3f800000);
nvkm_wo32(chan->inst, 0x381c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3848, 0x40000000);
nvkm_wo32(chan->inst, 0x384c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3850, 0x3f000000);
nvkm_wo32(chan->inst, 0x3858, 0x40000000);
nvkm_wo32(chan->inst, 0x385c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3864, 0xbf800000);
nvkm_wo32(chan->inst, 0x386c, 0xbf800000);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv30_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x30),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv30_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv30_gr = {
.chan_new = nv30_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
{}
}
};
static int
nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
......@@ -141,6 +137,8 @@ nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv30_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
......@@ -148,8 +146,6 @@ nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv30_gr_cclass;
nv_engine(gr)->sclass = nv30_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}
......
......@@ -5,127 +5,123 @@
#include <engine/fifo/chan.h>
/*******************************************************************************
* Graphics object classes
* PGRAPH context
******************************************************************************/
static struct nvkm_oclass
nv34_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0697, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
static const struct nvkm_object_func
nv34_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x46dc, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x040c, 0x01000101);
nvkm_wo32(image, 0x0420, 0x00000111);
nvkm_wo32(image, 0x0424, 0x00000060);
nvkm_wo32(image, 0x0440, 0x00000080);
nvkm_wo32(image, 0x0444, 0xffff0000);
nvkm_wo32(image, 0x0448, 0x00000001);
nvkm_wo32(image, 0x045c, 0x44400000);
nvkm_wo32(image, 0x0480, 0xffff0000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x040c, 0x01000101);
nvkm_wo32(chan->inst, 0x0420, 0x00000111);
nvkm_wo32(chan->inst, 0x0424, 0x00000060);
nvkm_wo32(chan->inst, 0x0440, 0x00000080);
nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
nvkm_wo32(chan->inst, 0x0448, 0x00000001);
nvkm_wo32(chan->inst, 0x045c, 0x44400000);
nvkm_wo32(chan->inst, 0x0480, 0xffff0000);
for (i = 0x04d4; i < 0x04dc; i += 4)
nvkm_wo32(image, i, 0x0fff0000);
nvkm_wo32(image, 0x04e0, 0x00011100);
nvkm_wo32(chan->inst, i, 0x0fff0000);
nvkm_wo32(chan->inst, 0x04e0, 0x00011100);
for (i = 0x04fc; i < 0x053c; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x0544, 0x4b7fffff);
nvkm_wo32(image, 0x057c, 0x00000080);
nvkm_wo32(image, 0x0580, 0x30201000);
nvkm_wo32(image, 0x0584, 0x70605040);
nvkm_wo32(image, 0x0588, 0xb8a89888);
nvkm_wo32(image, 0x058c, 0xf8e8d8c8);
nvkm_wo32(image, 0x05a0, 0xb0000000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x057c, 0x00000080);
nvkm_wo32(chan->inst, 0x0580, 0x30201000);
nvkm_wo32(chan->inst, 0x0584, 0x70605040);
nvkm_wo32(chan->inst, 0x0588, 0xb8a89888);
nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8);
nvkm_wo32(chan->inst, 0x05a0, 0xb0000000);
for (i = 0x05f0; i < 0x0630; i += 4)
nvkm_wo32(image, i, 0x00010588);
nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0630; i < 0x0670; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06b0; i < 0x06f0; i += 4)
nvkm_wo32(image, i, 0x0008aae4);
nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x06f0; i < 0x0730; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0730; i < 0x0770; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(image, 0x0850, 0x00040000);
nvkm_wo32(image, 0x0854, 0x00010000);
nvkm_wo32(chan->inst, i, 0x00080008);
nvkm_wo32(chan->inst, 0x0850, 0x00040000);
nvkm_wo32(chan->inst, 0x0854, 0x00010000);
for (i = 0x0858; i < 0x0868; i += 4)
nvkm_wo32(image, i, 0x00040004);
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
nvkm_wo32(image, i + 0, 0x10700ff9);
nvkm_wo32(image, i + 1, 0x0436086c);
nvkm_wo32(image, i + 2, 0x000c001b);
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
nvkm_wo32(chan->inst, i + 1, 0x0436086c);
nvkm_wo32(chan->inst, i + 2, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
nvkm_wo32(image, i, 0x0000ffff);
nvkm_wo32(image, 0x2ae0, 0x3f800000);
nvkm_wo32(image, 0x2e9c, 0x3f800000);
nvkm_wo32(image, 0x2eb0, 0x3f800000);
nvkm_wo32(image, 0x2edc, 0x40000000);
nvkm_wo32(image, 0x2ee0, 0x3f800000);
nvkm_wo32(image, 0x2ee4, 0x3f000000);
nvkm_wo32(image, 0x2eec, 0x40000000);
nvkm_wo32(image, 0x2ef0, 0x3f800000);
nvkm_wo32(image, 0x2ef8, 0xbf800000);
nvkm_wo32(image, 0x2f00, 0xbf800000);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x0000ffff);
nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000);
nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000);
nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000);
nvkm_wo32(chan->inst, 0x2edc, 0x40000000);
nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000);
nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000);
nvkm_wo32(chan->inst, 0x2eec, 0x40000000);
nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000);
nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000);
nvkm_wo32(chan->inst, 0x2f00, 0xbf800000);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv34_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x34),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv34_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv34_gr = {
.chan_new = nv34_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
{}
}
};
static int
nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
......@@ -140,6 +136,8 @@ nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv34_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
......@@ -147,8 +145,6 @@ nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv34_gr_cclass;
nv_engine(gr)->sclass = nv34_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}
......
......@@ -5,127 +5,123 @@
#include <engine/fifo/chan.h>
/*******************************************************************************
* Graphics object classes
* PGRAPH context
******************************************************************************/
static struct nvkm_oclass
nv35_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0497, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
static const struct nvkm_object_func
nv35_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv35_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x577c, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x040c, 0x00000101);
nvkm_wo32(image, 0x0420, 0x00000111);
nvkm_wo32(image, 0x0424, 0x00000060);
nvkm_wo32(image, 0x0440, 0x00000080);
nvkm_wo32(image, 0x0444, 0xffff0000);
nvkm_wo32(image, 0x0448, 0x00000001);
nvkm_wo32(image, 0x045c, 0x44400000);
nvkm_wo32(image, 0x0488, 0xffff0000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x040c, 0x00000101);
nvkm_wo32(chan->inst, 0x0420, 0x00000111);
nvkm_wo32(chan->inst, 0x0424, 0x00000060);
nvkm_wo32(chan->inst, 0x0440, 0x00000080);
nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
nvkm_wo32(chan->inst, 0x0448, 0x00000001);
nvkm_wo32(chan->inst, 0x045c, 0x44400000);
nvkm_wo32(chan->inst, 0x0488, 0xffff0000);
for (i = 0x04dc; i < 0x04e4; i += 4)
nvkm_wo32(image, i, 0x0fff0000);
nvkm_wo32(image, 0x04e8, 0x00011100);
nvkm_wo32(chan->inst, i, 0x0fff0000);
nvkm_wo32(chan->inst, 0x04e8, 0x00011100);
for (i = 0x0504; i < 0x0544; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x054c, 0x4b7fffff);
nvkm_wo32(image, 0x0588, 0x00000080);
nvkm_wo32(image, 0x058c, 0x30201000);
nvkm_wo32(image, 0x0590, 0x70605040);
nvkm_wo32(image, 0x0594, 0xb8a89888);
nvkm_wo32(image, 0x0598, 0xf8e8d8c8);
nvkm_wo32(image, 0x05ac, 0xb0000000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x054c, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x0588, 0x00000080);
nvkm_wo32(chan->inst, 0x058c, 0x30201000);
nvkm_wo32(chan->inst, 0x0590, 0x70605040);
nvkm_wo32(chan->inst, 0x0594, 0xb8a89888);
nvkm_wo32(chan->inst, 0x0598, 0xf8e8d8c8);
nvkm_wo32(chan->inst, 0x05ac, 0xb0000000);
for (i = 0x0604; i < 0x0644; i += 4)
nvkm_wo32(image, i, 0x00010588);
nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0644; i < 0x0684; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06c4; i < 0x0704; i += 4)
nvkm_wo32(image, i, 0x0008aae4);
nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x0704; i < 0x0744; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0744; i < 0x0784; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(image, 0x0860, 0x00040000);
nvkm_wo32(image, 0x0864, 0x00010000);
nvkm_wo32(chan->inst, i, 0x00080008);
nvkm_wo32(chan->inst, 0x0860, 0x00040000);
nvkm_wo32(chan->inst, 0x0864, 0x00010000);
for (i = 0x0868; i < 0x0878; i += 4)
nvkm_wo32(image, i, 0x00040004);
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f1c; i <= 0x308c ; i += 16) {
nvkm_wo32(image, i + 0, 0x10700ff9);
nvkm_wo32(image, i + 4, 0x0436086c);
nvkm_wo32(image, i + 8, 0x000c001b);
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
nvkm_wo32(chan->inst, i + 4, 0x0436086c);
nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x30bc; i < 0x30cc; i += 4)
nvkm_wo32(image, i, 0x0000ffff);
nvkm_wo32(image, 0x3450, 0x3f800000);
nvkm_wo32(image, 0x380c, 0x3f800000);
nvkm_wo32(image, 0x3820, 0x3f800000);
nvkm_wo32(image, 0x384c, 0x40000000);
nvkm_wo32(image, 0x3850, 0x3f800000);
nvkm_wo32(image, 0x3854, 0x3f000000);
nvkm_wo32(image, 0x385c, 0x40000000);
nvkm_wo32(image, 0x3860, 0x3f800000);
nvkm_wo32(image, 0x3868, 0xbf800000);
nvkm_wo32(image, 0x3870, 0xbf800000);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x0000ffff);
nvkm_wo32(chan->inst, 0x3450, 0x3f800000);
nvkm_wo32(chan->inst, 0x380c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3820, 0x3f800000);
nvkm_wo32(chan->inst, 0x384c, 0x40000000);
nvkm_wo32(chan->inst, 0x3850, 0x3f800000);
nvkm_wo32(chan->inst, 0x3854, 0x3f000000);
nvkm_wo32(chan->inst, 0x385c, 0x40000000);
nvkm_wo32(chan->inst, 0x3860, 0x3f800000);
nvkm_wo32(chan->inst, 0x3868, 0xbf800000);
nvkm_wo32(chan->inst, 0x3870, 0xbf800000);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv35_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x35),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv35_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv35_gr = {
.chan_new = nv35_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
{}
}
};
static int
nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
......@@ -140,6 +136,8 @@ nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv35_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
......@@ -147,8 +145,6 @@ nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv35_gr_cclass;
nv_engine(gr)->sclass = nv35_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}
......
#ifndef __NV40_GR_H__
#define __NV40_GR_H__
#include <engine/gr.h>
#define nv40_gr(p) container_of((p), struct nv40_gr, base)
#include "priv.h"
struct nvkm_gpuobj;
struct nv40_gr {
struct nvkm_gr base;
u32 size;
struct list_head chan;
};
#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
struct nv40_gr_chan {
struct nvkm_object object;
struct nv40_gr *gr;
struct nvkm_fifo_chan *fifo;
u32 inst;
struct list_head head;
};
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features
......
#ifndef __NV50_GR_H__
#define __NV50_GR_H__
#include <engine/gr.h>
struct nvkm_device;
struct nvkm_gpuobj;
#define nv50_gr(p) container_of((p), struct nv50_gr, base)
#include "priv.h"
struct nv50_gr {
struct nvkm_gr base;
const struct nv50_gr_func *func;
spinlock_t lock;
u32 size;
};
struct nv50_gr_func {
void *(*dtor)(struct nv50_gr *);
struct nvkm_sclass sclass[];
};
#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
struct nv50_gr_chan {
struct nvkm_object object;
struct nv50_gr *gr;
};
int nv50_grctx_init(struct nvkm_device *, u32 *size);
void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);
......
#ifndef __NVKM_GR_PRIV_H__
#define __NVKM_GR_PRIV_H__
#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
#include <engine/gr.h>
struct nvkm_fifo_chan;
struct nvkm_gr_func {
int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
const struct nvkm_oclass *, struct nvkm_object **);
int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
struct nvkm_sclass sclass[];
};
extern const struct nvkm_object_func nv04_gr_object;
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment