Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8f0649b5
Commit
8f0649b5
authored
Aug 20, 2015
by
Ben Skeggs
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
drm/nouveau/fifo: convert user classes to new-style nvkm_object
Signed-off-by:
Ben Skeggs
<
bskeggs@redhat.com
>
parent
9a65a38c
Changes
61
Hide whitespace changes
Inline
Side-by-side
Showing
61 changed files
with
2025 additions
and
2121 deletions
+2025
-2121
drivers/gpu/drm/nouveau/include/nvif/device.h
drivers/gpu/drm/nouveau/include/nvif/device.h
+1
-1
drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
+0
-3
drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+1
-0
drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
+0
-1
drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
+0
-52
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+34
-26
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nv84_fence.c
+1
-1
drivers/gpu/drm/nouveau/nvkm/core/Kbuild
drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+0
-1
drivers/gpu/drm/nouveau/nvkm/core/engctx.c
drivers/gpu/drm/nouveau/nvkm/core/engctx.c
+1
-74
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+10
-0
drivers/gpu/drm/nouveau/nvkm/core/handle.c
drivers/gpu/drm/nouveau/nvkm/core/handle.c
+2
-50
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+1
-1
drivers/gpu/drm/nouveau/nvkm/core/namedb.c
drivers/gpu/drm/nouveau/nvkm/core/namedb.c
+0
-201
drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+2
-1
drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+4
-3
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
+1
-1
drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
+23
-83
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+65
-43
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+405
-72
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+24
-20
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+193
-134
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+13
-12
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+17
-15
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
+12
-12
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
+157
-136
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+20
-27
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
+34
-68
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+77
-139
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
+27
-34
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
+28
-36
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
+134
-106
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
+32
-56
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+11
-2
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+39
-22
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+5
-0
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+48
-66
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+42
-0
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+9
-1
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
+31
-59
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+184
-190
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+189
-218
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
+6
-4
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
+29
-47
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+27
-13
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+1
-8
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+7
-12
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+11
-16
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+11
-16
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+12
-3
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+1
-0
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+0
-1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+11
-8
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+2
-1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+2
-1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+13
-11
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+2
-2
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+2
-3
drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+2
-1
drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+2
-2
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+1
-1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+6
-4
No files found.
drivers/gpu/drm/nouveau/include/nvif/device.h
View file @
8f0649b5
...
@@ -64,6 +64,6 @@ u64 nvif_device_time(struct nvif_device *);
...
@@ -64,6 +64,6 @@ u64 nvif_device_time(struct nvif_device *);
#include <engine/gr.h>
#include <engine/gr.h>
#include <engine/sw.h>
#include <engine/sw.h>
#define nvxx_fifo(a) nv
km_fifo(nvxx_device(a))
#define nvxx_fifo(a) nv
xx_device(a)->fifo
#define nvxx_gr(a) nvkm_gr(nvxx_device(a))
#define nvxx_gr(a) nvkm_gr(nvxx_device(a))
#endif
#endif
drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
View file @
8f0649b5
...
@@ -45,7 +45,4 @@ int _nvkm_engctx_init(struct nvkm_object *);
...
@@ -45,7 +45,4 @@ int _nvkm_engctx_init(struct nvkm_object *);
int
_nvkm_engctx_fini
(
struct
nvkm_object
*
,
bool
suspend
);
int
_nvkm_engctx_fini
(
struct
nvkm_object
*
,
bool
suspend
);
#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
struct
nvkm_object
*
nvkm_engctx_get
(
struct
nvkm_engine
*
,
u64
addr
);
void
nvkm_engctx_put
(
struct
nvkm_object
*
);
#endif
#endif
drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
View file @
8f0649b5
...
@@ -42,6 +42,7 @@ struct nvkm_engine_func {
...
@@ -42,6 +42,7 @@ struct nvkm_engine_func {
int
(
*
sclass
)(
struct
nvkm_oclass
*
,
int
index
);
int
(
*
sclass
)(
struct
nvkm_oclass
*
,
int
index
);
}
fifo
;
}
fifo
;
const
struct
nvkm_object_func
*
cclass
;
struct
nvkm_sclass
sclass
[];
struct
nvkm_sclass
sclass
[];
};
};
...
...
drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
View file @
8f0649b5
...
@@ -4,7 +4,6 @@
...
@@ -4,7 +4,6 @@
struct
nvkm_object
;
struct
nvkm_object
;
struct
nvkm_handle
{
struct
nvkm_handle
{
struct
nvkm_namedb
*
namedb
;
struct
list_head
node
;
struct
list_head
node
;
struct
list_head
head
;
struct
list_head
head
;
...
...
drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
deleted
100644 → 0
View file @
9a65a38c
#ifndef __NVKM_NAMEDB_H__
#define __NVKM_NAMEDB_H__
#include <core/parent.h>
struct
nvkm_handle
;
struct
nvkm_namedb
{
struct
nvkm_parent
parent
;
rwlock_t
lock
;
struct
list_head
list
;
};
static
inline
struct
nvkm_namedb
*
nv_namedb
(
void
*
obj
)
{
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
BUG_ON
(
!
nv_iclass
(
obj
,
NV_NAMEDB_CLASS
));
#endif
return
obj
;
}
#define nvkm_namedb_create(p,e,c,v,s,m,d) \
nvkm_namedb_create_((p), (e), (c), (v), (s), (m), \
sizeof(**d), (void **)d)
#define nvkm_namedb_init(p) \
nvkm_parent_init(&(p)->parent)
#define nvkm_namedb_fini(p,s) \
nvkm_parent_fini(&(p)->parent, (s))
#define nvkm_namedb_destroy(p) \
nvkm_parent_destroy(&(p)->parent)
int
nvkm_namedb_create_
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
struct
nvkm_oclass
*
,
u32
pclass
,
struct
nvkm_oclass
*
,
u64
engcls
,
int
size
,
void
**
);
int
_nvkm_namedb_ctor
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
struct
nvkm_oclass
*
,
void
*
,
u32
,
struct
nvkm_object
**
);
#define _nvkm_namedb_dtor _nvkm_parent_dtor
#define _nvkm_namedb_init _nvkm_parent_init
#define _nvkm_namedb_fini _nvkm_parent_fini
int
nvkm_namedb_insert
(
struct
nvkm_namedb
*
,
u32
name
,
struct
nvkm_object
*
,
struct
nvkm_handle
*
);
void
nvkm_namedb_remove
(
struct
nvkm_handle
*
);
struct
nvkm_handle
*
nvkm_namedb_get
(
struct
nvkm_namedb
*
,
u32
);
struct
nvkm_handle
*
nvkm_namedb_get_class
(
struct
nvkm_namedb
*
,
s32
);
struct
nvkm_handle
*
nvkm_namedb_get_vinst
(
struct
nvkm_namedb
*
,
u64
);
struct
nvkm_handle
*
nvkm_namedb_get_cinst
(
struct
nvkm_namedb
*
,
u32
);
void
nvkm_namedb_put
(
struct
nvkm_handle
*
);
#endif
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
View file @
8f0649b5
#ifndef __NVKM_FIFO_H__
#ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__
#define __NVKM_FIFO_H__
#include <core/namedb.h>
#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
#include <core/engine.h>
#include <core/event.h>
#define NVKM_FIFO_CHID_NR 4096
struct
nvkm_fifo_engn
{
struct
nvkm_object
*
object
;
int
refcount
;
int
usecount
;
};
struct
nvkm_fifo_chan
{
struct
nvkm_fifo_chan
{
struct
nvkm_namedb
namedb
;
const
struct
nvkm_fifo_chan_func
*
func
;
struct
nvkm_gpuobj
*
pushgpu
;
struct
nvkm_fifo
*
fifo
;
u64
engines
;
struct
nvkm_object
object
;
struct
list_head
head
;
u16
chid
;
struct
nvkm_gpuobj
*
inst
;
struct
nvkm_gpuobj
*
push
;
struct
nvkm_vm
*
vm
;
void
__iomem
*
user
;
void
__iomem
*
user
;
u64
addr
;
u64
addr
;
u32
size
;
u32
size
;
u16
chid
;
u64
inst
;
struct
nvkm_fifo_engn
engn
[
NVDEV_SUBDEV_NR
]
;
};
};
static
inline
struct
nvkm_fifo_chan
*
extern
const
struct
nvkm_object_func
nvkm_fifo_chan_func
;
nvkm_fifo_chan
(
void
*
obj
)
{
return
(
void
*
)
nv_namedb
(
obj
);
}
#include <core/gpuobj.h>
#include <core/gpuobj.h>
struct
nvkm_fifo_base
{
struct
nvkm_fifo_base
{
struct
nvkm_gpuobj
gpuobj
;
struct
nvkm_gpuobj
gpuobj
;
};
};
...
@@ -39,25 +53,27 @@ struct nvkm_fifo_base {
...
@@ -39,25 +53,27 @@ struct nvkm_fifo_base {
#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
#include <core/engine.h>
#include <core/event.h>
struct
nvkm_fifo
{
struct
nvkm_fifo
{
struct
nvkm_engine
engine
;
struct
nvkm_engine
engine
;
const
struct
nvkm_fifo_func
*
func
;
struct
nvkm_event
cevent
;
/* channel creation event */
struct
nvkm_event
cevent
;
/* channel creation event */
struct
nvkm_event
uevent
;
/* async user trigger */
struct
nvkm_event
uevent
;
/* async user trigger */
struct
nvkm_object
**
channel
;
DECLARE_BITMAP
(
mask
,
NVKM_FIFO_CHID_NR
);
int
nr
;
struct
list_head
chan
;
spinlock_t
lock
;
spinlock_t
lock
;
u16
min
;
u16
max
;
int
(
*
chid
)(
struct
nvkm_fifo
*
,
struct
nvkm_object
*
);
void
(
*
pause
)(
struct
nvkm_fifo
*
,
unsigned
long
*
);
void
(
*
pause
)(
struct
nvkm_fifo
*
,
unsigned
long
*
);
void
(
*
start
)(
struct
nvkm_fifo
*
,
unsigned
long
*
);
void
(
*
start
)(
struct
nvkm_fifo
*
,
unsigned
long
*
);
};
};
struct
nvkm_fifo_func
{
void
*
(
*
dtor
)(
struct
nvkm_fifo
*
);
const
struct
nvkm_fifo_chan_oclass
*
chan
[];
};
void
nvkm_fifo_chan_put
(
struct
nvkm_fifo
*
,
unsigned
long
flags
,
void
nvkm_fifo_chan_put
(
struct
nvkm_fifo
*
,
unsigned
long
flags
,
struct
nvkm_fifo_chan
**
);
struct
nvkm_fifo_chan
**
);
struct
nvkm_fifo_chan
*
struct
nvkm_fifo_chan
*
...
@@ -65,12 +81,6 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
...
@@ -65,12 +81,6 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
struct
nvkm_fifo_chan
*
struct
nvkm_fifo_chan
*
nvkm_fifo_chan_chid
(
struct
nvkm_fifo
*
,
int
chid
,
unsigned
long
*
flags
);
nvkm_fifo_chan_chid
(
struct
nvkm_fifo
*
,
int
chid
,
unsigned
long
*
flags
);
static
inline
struct
nvkm_fifo
*
nvkm_fifo
(
void
*
obj
)
{
return
(
void
*
)
nvkm_engine
(
obj
,
NVDEV_ENGINE_FIFO
);
}
#define nvkm_fifo_create(o,e,c,fc,lc,d) \
#define nvkm_fifo_create(o,e,c,fc,lc,d) \
nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
#define nvkm_fifo_init(p) \
#define nvkm_fifo_init(p) \
...
@@ -82,8 +92,6 @@ int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *,
...
@@ -82,8 +92,6 @@ int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *,
struct
nvkm_oclass
*
,
int
min
,
int
max
,
struct
nvkm_oclass
*
,
int
min
,
int
max
,
int
size
,
void
**
);
int
size
,
void
**
);
void
nvkm_fifo_destroy
(
struct
nvkm_fifo
*
);
void
nvkm_fifo_destroy
(
struct
nvkm_fifo
*
);
const
char
*
nvkm_client_name_for_fifo_chid
(
struct
nvkm_fifo
*
fifo
,
u32
chid
);
#define _nvkm_fifo_init _nvkm_engine_init
#define _nvkm_fifo_init _nvkm_engine_init
#define _nvkm_fifo_fini _nvkm_engine_fini
#define _nvkm_fifo_fini _nvkm_engine_fini
...
...
drivers/gpu/drm/nouveau/nv84_fence.c
View file @
8f0649b5
...
@@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm)
...
@@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv
->
base
.
context_new
=
nv84_fence_context_new
;
priv
->
base
.
context_new
=
nv84_fence_context_new
;
priv
->
base
.
context_del
=
nv84_fence_context_del
;
priv
->
base
.
context_del
=
nv84_fence_context_del
;
priv
->
base
.
contexts
=
fifo
->
max
+
1
;
priv
->
base
.
contexts
=
fifo
->
nr
;
priv
->
base
.
context_base
=
fence_context_alloc
(
priv
->
base
.
contexts
);
priv
->
base
.
context_base
=
fence_context_alloc
(
priv
->
base
.
contexts
);
priv
->
base
.
uevent
=
true
;
priv
->
base
.
uevent
=
true
;
...
...
drivers/gpu/drm/nouveau/nvkm/core/Kbuild
View file @
8f0649b5
...
@@ -8,7 +8,6 @@ nvkm-y += nvkm/core/handle.o
...
@@ -8,7 +8,6 @@ nvkm-y += nvkm/core/handle.o
nvkm-y += nvkm/core/ioctl.o
nvkm-y += nvkm/core/ioctl.o
nvkm-y += nvkm/core/memory.o
nvkm-y += nvkm/core/memory.o
nvkm-y += nvkm/core/mm.o
nvkm-y += nvkm/core/mm.o
nvkm-y += nvkm/core/namedb.o
nvkm-y += nvkm/core/notify.o
nvkm-y += nvkm/core/notify.o
nvkm-y += nvkm/core/object.o
nvkm-y += nvkm/core/object.o
nvkm-y += nvkm/core/oproxy.o
nvkm-y += nvkm/core/oproxy.o
...
...
drivers/gpu/drm/nouveau/nvkm/core/engctx.c
View file @
8f0649b5
...
@@ -124,58 +124,12 @@ nvkm_engctx_destroy(struct nvkm_engctx *engctx)
...
@@ -124,58 +124,12 @@ nvkm_engctx_destroy(struct nvkm_engctx *engctx)
int
int
nvkm_engctx_init
(
struct
nvkm_engctx
*
engctx
)
nvkm_engctx_init
(
struct
nvkm_engctx
*
engctx
)
{
{
struct
nvkm_object
*
object
=
nv_object
(
engctx
);
return
nvkm_gpuobj_init
(
&
engctx
->
gpuobj
);
struct
nvkm_subdev
*
subdev
=
nv_subdev
(
object
->
engine
);
struct
nvkm_object
*
parent
;
struct
nvkm_subdev
*
pardev
;
int
ret
;
ret
=
nvkm_gpuobj_init
(
&
engctx
->
gpuobj
);
if
(
ret
)
return
ret
;
parent
=
nv_pclass
(
object
->
parent
,
NV_PARENT_CLASS
);
pardev
=
nv_subdev
(
parent
->
engine
);
if
(
nv_parent
(
parent
)
->
context_attach
)
{
mutex_lock
(
&
pardev
->
mutex
);
ret
=
nv_parent
(
parent
)
->
context_attach
(
parent
,
object
);
mutex_unlock
(
&
pardev
->
mutex
);
}
if
(
ret
)
{
nvkm_error
(
pardev
,
"failed to attach %s context, %d
\n
"
,
nvkm_subdev_name
[
subdev
->
index
],
ret
);
return
ret
;
}
nvkm_trace
(
pardev
,
"attached %s context
\n
"
,
nvkm_subdev_name
[
subdev
->
index
]);
return
0
;
}
}
int
int
nvkm_engctx_fini
(
struct
nvkm_engctx
*
engctx
,
bool
suspend
)
nvkm_engctx_fini
(
struct
nvkm_engctx
*
engctx
,
bool
suspend
)
{
{
struct
nvkm_object
*
object
=
nv_object
(
engctx
);
struct
nvkm_subdev
*
subdev
=
nv_subdev
(
object
->
engine
);
struct
nvkm_object
*
parent
;
struct
nvkm_subdev
*
pardev
;
int
ret
=
0
;
parent
=
nv_pclass
(
object
->
parent
,
NV_PARENT_CLASS
);
pardev
=
nv_subdev
(
parent
->
engine
);
if
(
nv_parent
(
parent
)
->
context_detach
)
{
mutex_lock
(
&
pardev
->
mutex
);
ret
=
nv_parent
(
parent
)
->
context_detach
(
parent
,
suspend
,
object
);
mutex_unlock
(
&
pardev
->
mutex
);
}
if
(
ret
)
{
nvkm_error
(
pardev
,
"failed to detach %s context, %d
\n
"
,
nvkm_subdev_name
[
subdev
->
index
],
ret
);
return
ret
;
}
nvkm_trace
(
pardev
,
"detached %s context
\n
"
,
nvkm_subdev_name
[
subdev
->
index
]);
return
nvkm_gpuobj_fini
(
&
engctx
->
gpuobj
,
suspend
);
return
nvkm_gpuobj_fini
(
&
engctx
->
gpuobj
,
suspend
);
}
}
...
@@ -210,30 +164,3 @@ _nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
...
@@ -210,30 +164,3 @@ _nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
{
{
return
nvkm_engctx_fini
(
nv_engctx
(
object
),
suspend
);
return
nvkm_engctx_fini
(
nv_engctx
(
object
),
suspend
);
}
}
struct
nvkm_object
*
nvkm_engctx_get
(
struct
nvkm_engine
*
engine
,
u64
addr
)
{
struct
nvkm_engctx
*
engctx
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
list_for_each_entry
(
engctx
,
&
engine
->
contexts
,
head
)
{
if
(
engctx
->
addr
==
addr
)
{
engctx
->
save
=
flags
;
return
nv_object
(
engctx
);
}
}
spin_unlock_irqrestore
(
&
engine
->
lock
,
flags
);
return
NULL
;
}
void
nvkm_engctx_put
(
struct
nvkm_object
*
object
)
{
if
(
object
)
{
struct
nvkm_engine
*
engine
=
nv_engine
(
object
->
engine
);
struct
nvkm_engctx
*
engctx
=
nv_engctx
(
object
);
spin_unlock_irqrestore
(
&
engine
->
lock
,
engctx
->
save
);
}
}
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
View file @
8f0649b5
...
@@ -231,6 +231,8 @@ nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
...
@@ -231,6 +231,8 @@ nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
nvkm_object_destroy
(
&
gpuobj
->
object
);
nvkm_object_destroy
(
&
gpuobj
->
object
);
}
}
#include <engine/fifo.h>
int
int
nvkm_gpuobj_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nvkm_gpuobj_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
u32
pclass
,
struct
nvkm_oclass
*
oclass
,
u32
pclass
,
...
@@ -240,11 +242,19 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -240,11 +242,19 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct
nvkm_device
*
device
=
nv_device
(
parent
);
struct
nvkm_device
*
device
=
nv_device
(
parent
);
struct
nvkm_gpuobj
*
pargpu
=
NULL
;
struct
nvkm_gpuobj
*
pargpu
=
NULL
;
struct
nvkm_gpuobj
*
gpuobj
;
struct
nvkm_gpuobj
*
gpuobj
;
struct
nvkm_object
*
object
=
objgpu
;
const
bool
zero
=
(
flags
&
NVOBJ_FLAG_ZERO_ALLOC
);
const
bool
zero
=
(
flags
&
NVOBJ_FLAG_ZERO_ALLOC
);
int
ret
;
int
ret
;
*
pobject
=
NULL
;
*
pobject
=
NULL
;
while
(
object
&&
object
->
func
!=
&
nvkm_fifo_chan_func
)
object
=
object
->
parent
;
if
(
object
)
{
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
);
pargpu
=
chan
->
inst
;
}
else
if
(
objgpu
)
{
if
(
objgpu
)
{
while
((
objgpu
=
nv_pclass
(
objgpu
,
NV_GPUOBJ_CLASS
)))
{
while
((
objgpu
=
nv_pclass
(
objgpu
,
NV_GPUOBJ_CLASS
)))
{
if
(
nv_gpuobj
(
objgpu
)
->
heap
.
block_size
)
if
(
nv_gpuobj
(
objgpu
)
->
heap
.
block_size
)
...
...
drivers/gpu/drm/nouveau/nvkm/core/handle.c
View file @
8f0649b5
...
@@ -23,7 +23,7 @@
...
@@ -23,7 +23,7 @@
*/
*/
#include <core/handle.h>
#include <core/handle.h>
#include <core/client.h>
#include <core/client.h>
#include <core/
namedb
.h>
#include <core/
parent
.h>
#define hprintk(h,l,f,a...) do { \
#define hprintk(h,l,f,a...) do { \
struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
...
@@ -98,14 +98,9 @@ int
...
@@ -98,14 +98,9 @@ int
nvkm_handle_create
(
struct
nvkm_handle
*
parent
,
u32
_handle
,
nvkm_handle_create
(
struct
nvkm_handle
*
parent
,
u32
_handle
,
struct
nvkm_object
*
object
,
struct
nvkm_handle
**
phandle
)
struct
nvkm_object
*
object
,
struct
nvkm_handle
**
phandle
)
{
{
struct
nvkm_object
*
namedb
;
struct
nvkm_handle
*
handle
;
struct
nvkm_handle
*
handle
;
int
ret
;
int
ret
;
namedb
=
parent
?
parent
->
object
:
NULL
;
while
(
namedb
&&
!
nv_iclass
(
namedb
,
NV_NAMEDB_CLASS
))
namedb
=
namedb
->
parent
;
handle
=
kzalloc
(
sizeof
(
*
handle
),
GFP_KERNEL
);
handle
=
kzalloc
(
sizeof
(
*
handle
),
GFP_KERNEL
);
if
(
!
handle
)
if
(
!
handle
)
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -118,15 +113,6 @@ nvkm_handle_create(struct nvkm_handle *parent, u32 _handle,
...
@@ -118,15 +113,6 @@ nvkm_handle_create(struct nvkm_handle *parent, u32 _handle,
handle
->
parent
=
parent
;
handle
->
parent
=
parent
;
nvkm_object_ref
(
object
,
&
handle
->
object
);
nvkm_object_ref
(
object
,
&
handle
->
object
);
if
(
namedb
)
{
ret
=
nvkm_namedb_insert
(
nv_namedb
(
namedb
),
_handle
,
object
,
handle
);
if
(
ret
)
{
kfree
(
handle
);
return
ret
;
}
}
if
(
parent
)
{
if
(
parent
)
{
if
(
nv_iclass
(
parent
->
object
,
NV_PARENT_CLASS
)
&&
if
(
nv_iclass
(
parent
->
object
,
NV_PARENT_CLASS
)
&&
nv_parent
(
parent
->
object
)
->
object_attach
)
{
nv_parent
(
parent
->
object
)
->
object_attach
)
{
...
@@ -168,40 +154,6 @@ nvkm_handle_destroy(struct nvkm_handle *handle)
...
@@ -168,40 +154,6 @@ nvkm_handle_destroy(struct nvkm_handle *handle)
}
}
hprintk
(
handle
,
TRACE
,
"destroy completed
\n
"
);
hprintk
(
handle
,
TRACE
,
"destroy completed
\n
"
);
nvkm_
namedb_remove
(
handle
);
nvkm_
object_ref
(
NULL
,
&
handle
->
object
);
kfree
(
handle
);
kfree
(
handle
);
}
}
struct
nvkm_handle
*
nvkm_handle_get_class
(
struct
nvkm_object
*
engctx
,
u16
oclass
)
{
struct
nvkm_namedb
*
namedb
;
if
(
engctx
&&
(
namedb
=
(
void
*
)
nv_pclass
(
engctx
,
NV_NAMEDB_CLASS
)))
return
nvkm_namedb_get_class
(
namedb
,
oclass
);
return
NULL
;
}
struct
nvkm_handle
*
nvkm_handle_get_vinst
(
struct
nvkm_object
*
engctx
,
u64
vinst
)
{
struct
nvkm_namedb
*
namedb
;
if
(
engctx
&&
(
namedb
=
(
void
*
)
nv_pclass
(
engctx
,
NV_NAMEDB_CLASS
)))
return
nvkm_namedb_get_vinst
(
namedb
,
vinst
);
return
NULL
;
}
struct
nvkm_handle
*
nvkm_handle_get_cinst
(
struct
nvkm_object
*
engctx
,
u32
cinst
)
{
struct
nvkm_namedb
*
namedb
;
if
(
engctx
&&
(
namedb
=
(
void
*
)
nv_pclass
(
engctx
,
NV_NAMEDB_CLASS
)))
return
nvkm_namedb_get_cinst
(
namedb
,
cinst
);
return
NULL
;
}
void
nvkm_handle_put
(
struct
nvkm_handle
*
handle
)
{
if
(
handle
)
nvkm_namedb_put
(
handle
);
}
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
View file @
8f0649b5
...
@@ -25,7 +25,7 @@
...
@@ -25,7 +25,7 @@
#include <core/client.h>
#include <core/client.h>
#include <core/engine.h>
#include <core/engine.h>
#include <core/handle.h>
#include <core/handle.h>
#include <core/
namedb
.h>
#include <core/
parent
.h>
#include <nvif/unpack.h>
#include <nvif/unpack.h>
#include <nvif/ioctl.h>
#include <nvif/ioctl.h>
...
...
drivers/gpu/drm/nouveau/nvkm/core/namedb.c
deleted
100644 → 0
View file @
9a65a38c
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/handle.h>
static
struct
nvkm_handle
*
nvkm_namedb_lookup
(
struct
nvkm_namedb
*
namedb
,
u32
name
)
{
struct
nvkm_handle
*
handle
;
list_for_each_entry
(
handle
,
&
namedb
->
list
,
node
)
{
if
(
handle
->
name
==
name
)
return
handle
;
}
return
NULL
;
}
static
struct
nvkm_handle
*
nvkm_namedb_lookup_class
(
struct
nvkm_namedb
*
namedb
,
s32
oclass
)
{
struct
nvkm_handle
*
handle
;
list_for_each_entry
(
handle
,
&
namedb
->
list
,
node
)
{
if
(
nv_mclass
(
handle
->
object
)
==
oclass
)
return
handle
;
}
return
NULL
;
}
static
struct
nvkm_handle
*
nvkm_namedb_lookup_vinst
(
struct
nvkm_namedb
*
namedb
,
u64
vinst
)
{
struct
nvkm_handle
*
handle
;
list_for_each_entry
(
handle
,
&
namedb
->
list
,
node
)
{
if
(
nv_iclass
(
handle
->
object
,
NV_GPUOBJ_CLASS
))
{
if
(
nv_gpuobj
(
handle
->
object
)
->
addr
==
vinst
)
return
handle
;
}
}
return
NULL
;
}
static
struct
nvkm_handle
*
nvkm_namedb_lookup_cinst
(
struct
nvkm_namedb
*
namedb
,
u32
cinst
)
{
struct
nvkm_handle
*
handle
;
list_for_each_entry
(
handle
,
&
namedb
->
list
,
node
)
{
if
(
nv_iclass
(
handle
->
object
,
NV_GPUOBJ_CLASS
))
{
if
(
nv_gpuobj
(
handle
->
object
)
->
node
&&
nv_gpuobj
(
handle
->
object
)
->
node
->
offset
==
cinst
)
return
handle
;
}
}
return
NULL
;
}
int
nvkm_namedb_insert
(
struct
nvkm_namedb
*
namedb
,
u32
name
,
struct
nvkm_object
*
object
,
struct
nvkm_handle
*
handle
)
{
int
ret
=
-
EEXIST
;
write_lock_irq
(
&
namedb
->
lock
);
if
(
!
nvkm_namedb_lookup
(
namedb
,
name
))
{
nvkm_object_ref
(
object
,
&
handle
->
object
);
handle
->
namedb
=
namedb
;
list_add
(
&
handle
->
node
,
&
namedb
->
list
);
ret
=
0
;
}
write_unlock_irq
(
&
namedb
->
lock
);
return
ret
;
}
void
nvkm_namedb_remove
(
struct
nvkm_handle
*
handle
)
{
struct
nvkm_namedb
*
namedb
=
handle
->
namedb
;
struct
nvkm_object
*
object
=
handle
->
object
;
if
(
handle
->
namedb
)
{
write_lock_irq
(
&
namedb
->
lock
);
list_del
(
&
handle
->
node
);
write_unlock_irq
(
&
namedb
->
lock
);
}
nvkm_object_ref
(
NULL
,
&
object
);
}
struct
nvkm_handle
*
nvkm_namedb_get
(
struct
nvkm_namedb
*
namedb
,
u32
name
)
{
struct
nvkm_handle
*
handle
;
read_lock
(
&
namedb
->
lock
);
handle
=
nvkm_namedb_lookup
(
namedb
,
name
);
if
(
handle
==
NULL
)
read_unlock
(
&
namedb
->
lock
);
return
handle
;
}
struct
nvkm_handle
*
nvkm_namedb_get_class
(
struct
nvkm_namedb
*
namedb
,
s32
oclass
)
{
struct
nvkm_handle
*
handle
;
read_lock
(
&
namedb
->
lock
);
handle
=
nvkm_namedb_lookup_class
(
namedb
,
oclass
);
if
(
handle
==
NULL
)
read_unlock
(
&
namedb
->
lock
);
return
handle
;
}
struct
nvkm_handle
*
nvkm_namedb_get_vinst
(
struct
nvkm_namedb
*
namedb
,
u64
vinst
)
{
struct
nvkm_handle
*
handle
;
read_lock
(
&
namedb
->
lock
);
handle
=
nvkm_namedb_lookup_vinst
(
namedb
,
vinst
);
if
(
handle
==
NULL
)
read_unlock
(
&
namedb
->
lock
);
return
handle
;
}
struct
nvkm_handle
*
nvkm_namedb_get_cinst
(
struct
nvkm_namedb
*
namedb
,
u32
cinst
)
{
struct
nvkm_handle
*
handle
;
read_lock
(
&
namedb
->
lock
);
handle
=
nvkm_namedb_lookup_cinst
(
namedb
,
cinst
);
if
(
handle
==
NULL
)
read_unlock
(
&
namedb
->
lock
);
return
handle
;
}
void
nvkm_namedb_put
(
struct
nvkm_handle
*
handle
)
{
if
(
handle
)
read_unlock
(
&
handle
->
namedb
->
lock
);
}
int
nvkm_namedb_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
u32
pclass
,
struct
nvkm_oclass
*
sclass
,
u64
engcls
,
int
length
,
void
**
pobject
)
{
struct
nvkm_namedb
*
namedb
;
int
ret
;
ret
=
nvkm_parent_create_
(
parent
,
engine
,
oclass
,
pclass
|
NV_NAMEDB_CLASS
,
sclass
,
engcls
,
length
,
pobject
);
namedb
=
*
pobject
;
if
(
ret
)
return
ret
;
rwlock_init
(
&
namedb
->
lock
);
INIT_LIST_HEAD
(
&
namedb
->
list
);
return
0
;
}
int
_nvkm_namedb_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_namedb
*
object
;
int
ret
;
ret
=
nvkm_namedb_create
(
parent
,
engine
,
oclass
,
0
,
NULL
,
0
,
&
object
);
*
pobject
=
nv_object
(
object
);
if
(
ret
)
return
ret
;
return
0
;
}
drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
View file @
8f0649b5
...
@@ -85,7 +85,8 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
...
@@ -85,7 +85,8 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
nvkm_error
(
subdev
,
"DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
nvkm_error
(
subdev
,
"DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
"subc %d mthd %04x data %08x
\n
"
,
ssta
,
"subc %d mthd %04x data %08x
\n
"
,
ssta
,
en
?
en
->
name
:
""
,
chan
?
chan
->
chid
:
-
1
,
en
?
en
->
name
:
""
,
chan
?
chan
->
chid
:
-
1
,
chan
?
chan
->
inst
:
0
,
nvkm_client_name
(
chan
),
chan
?
chan
->
inst
->
addr
:
0
,
chan
?
chan
->
object
.
client
->
name
:
"unknown"
,
subc
,
mthd
,
data
);
subc
,
mthd
,
data
);
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
View file @
8f0649b5
...
@@ -121,9 +121,10 @@ g84_cipher_intr(struct nvkm_subdev *subdev)
...
@@ -121,9 +121,10 @@ g84_cipher_intr(struct nvkm_subdev *subdev)
if
(
stat
)
{
if
(
stat
)
{
nvkm_snprintbf
(
msg
,
sizeof
(
msg
),
g84_cipher_intr_mask
,
stat
);
nvkm_snprintbf
(
msg
,
sizeof
(
msg
),
g84_cipher_intr_mask
,
stat
);
nvkm_error
(
subdev
,
"%08x [%s] ch %d [%010llx %s] "
nvkm_error
(
subdev
,
"%08x [%s] ch %d [%010llx %s] "
"mthd %04x data %08x
\n
"
,
"mthd %04x data %08x
\n
"
,
stat
,
msg
,
stat
,
msg
,
chan
?
chan
->
chid
:
-
1
,
(
u64
)
inst
<<
12
,
chan
?
chan
->
chid
:
-
1
,
(
u64
)
inst
<<
12
,
nvkm_client_name
(
chan
),
mthd
,
data
);
chan
?
chan
->
object
.
client
->
name
:
"unknown"
,
mthd
,
data
);
}
}
nvkm_fifo_chan_put
(
fifo
,
flags
,
&
chan
);
nvkm_fifo_chan_put
(
fifo
,
flags
,
&
chan
);
...
...
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
View file @
8f0649b5
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
*
*
* Authors: Ben Skeggs
* Authors: Ben Skeggs
*/
*/
#define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
;
#define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
#include "priv.h"
#include "priv.h"
#include <core/client.h>
#include <core/client.h>
...
...
drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
View file @
8f0649b5
...
@@ -24,6 +24,7 @@
...
@@ -24,6 +24,7 @@
#include "priv.h"
#include "priv.h"
#include <core/client.h>
#include <core/client.h>
#include <engine/fifo.h>
#include <nvif/class.h>
#include <nvif/class.h>
...
@@ -88,11 +89,19 @@ nvkm_dma_oclass_base = {
...
@@ -88,11 +89,19 @@ nvkm_dma_oclass_base = {
.
ctor
=
nvkm_dma_oclass_new
,
.
ctor
=
nvkm_dma_oclass_new
,
};
};
static
int
nvkm_dma_oclass_fifo_new
(
const
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
return
nvkm_dma_oclass_new
(
oclass
->
engine
->
subdev
.
device
,
oclass
,
data
,
size
,
pobject
);
}
static
const
struct
nvkm_sclass
static
const
struct
nvkm_sclass
nvkm_dma_sclass
[]
=
{
nvkm_dma_sclass
[]
=
{
{
0
,
0
,
NV_DMA_FROM_MEMORY
},
{
0
,
0
,
NV_DMA_FROM_MEMORY
,
NULL
,
nvkm_dma_oclass_fifo_new
},
{
0
,
0
,
NV_DMA_TO_MEMORY
},
{
0
,
0
,
NV_DMA_TO_MEMORY
,
NULL
,
nvkm_dma_oclass_fifo_new
},
{
0
,
0
,
NV_DMA_IN_MEMORY
},
{
0
,
0
,
NV_DMA_IN_MEMORY
,
NULL
,
nvkm_dma_oclass_fifo_new
},
};
};
static
int
static
int
...
@@ -110,89 +119,21 @@ nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
...
@@ -110,89 +119,21 @@ nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
return
count
;
return
count
;
}
}
static
const
struct
nvkm_engine_func
nvkm_dma
=
{
.
base
.
sclass
=
nvkm_dma_oclass_base_get
,
};
#include <core/gpuobj.h>
static
struct
nvkm_oclass
empty
=
{
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
dtor
=
nvkm_object_destroy
,
.
init
=
_nvkm_object_init
,
.
fini
=
_nvkm_object_fini
,
},
};
static
int
static
int
nvkm_dmaobj_compat_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nvkm_dma_oclass_fifo_get
(
struct
nvkm_oclass
*
oclass
,
int
index
)
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_oclass
hack
=
{
.
base
.
oclass
=
oclass
->
handle
,
.
client
=
nvkm_client
(
parent
),
.
parent
=
parent
,
.
engine
=
nv_engine
(
engine
),
};
struct
nvkm_dma
*
dma
=
(
void
*
)
engine
;
struct
nvkm_dma_impl
*
impl
=
(
void
*
)
dma
->
engine
.
subdev
.
object
.
oclass
;
struct
nvkm_dmaobj
*
dmaobj
=
NULL
;
struct
nvkm_gpuobj
*
gpuobj
;
int
ret
;
ret
=
impl
->
class_new
(
dma
,
&
hack
,
data
,
size
,
&
dmaobj
);
if
(
dmaobj
)
*
pobject
=
&
dmaobj
->
object
;
if
(
ret
)
return
ret
;
gpuobj
=
(
void
*
)
nv_pclass
(
parent
,
NV_GPUOBJ_CLASS
);
ret
=
dmaobj
->
func
->
bind
(
dmaobj
,
gpuobj
,
16
,
&
gpuobj
);
nvkm_object_ref
(
NULL
,
pobject
);
if
(
ret
)
return
ret
;
ret
=
nvkm_object_create
(
parent
,
engine
,
&
empty
,
0
,
pobject
);
if
(
ret
)
return
ret
;
gpuobj
->
object
.
parent
=
*
pobject
;
gpuobj
->
object
.
engine
=
&
dma
->
engine
;
gpuobj
->
object
.
oclass
=
oclass
;
gpuobj
->
object
.
pclass
=
NV_GPUOBJ_CLASS
;
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
gpuobj
->
object
.
_magic
=
NVKM_OBJECT_MAGIC
;
#endif
*
pobject
=
&
gpuobj
->
object
;
return
0
;
}
static
void
nvkm_dmaobj_compat_dtor
(
struct
nvkm_object
*
object
)
{
{
struct
nvkm_object
*
parent
=
object
->
parent
;
const
int
count
=
ARRAY_SIZE
(
nvkm_dma_sclass
);
struct
nvkm_gpuobj
*
gpuobj
=
(
void
*
)
object
;
if
(
index
<
count
)
{
nvkm_gpuobj_del
(
&
gpuobj
);
oclass
->
base
=
nvkm_dma_sclass
[
index
];
nvkm_object_ref
(
NULL
,
&
parent
);
return
index
;
}
return
count
;
}
}
static
struct
nvkm_ofuncs
static
const
struct
nvkm_engine_func
nvkm_dmaobj_compat_ofuncs
=
{
nvkm_dma
=
{
.
ctor
=
nvkm_dmaobj_compat_ctor
,
.
base
.
sclass
=
nvkm_dma_oclass_base_get
,
.
dtor
=
nvkm_dmaobj_compat_dtor
,
.
fifo
.
sclass
=
nvkm_dma_oclass_fifo_get
,
.
init
=
_nvkm_object_init
,
.
fini
=
_nvkm_object_fini
,
};
static
struct
nvkm_oclass
nvkm_dma_compat_sclass
[]
=
{
{
NV_DMA_FROM_MEMORY
,
&
nvkm_dmaobj_compat_ofuncs
},
{
NV_DMA_TO_MEMORY
,
&
nvkm_dmaobj_compat_ofuncs
},
{
NV_DMA_IN_MEMORY
,
&
nvkm_dmaobj_compat_ofuncs
},
{}
};
};
int
int
...
@@ -209,7 +150,6 @@ _nvkm_dma_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -209,7 +150,6 @@ _nvkm_dma_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
dmaeng
->
engine
.
sclass
=
nvkm_dma_compat_sclass
;
dmaeng
->
engine
.
func
=
&
nvkm_dma
;
dmaeng
->
engine
.
func
=
&
nvkm_dma
;
return
0
;
return
0
;
}
}
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
View file @
8f0649b5
...
@@ -44,12 +44,13 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
...
@@ -44,12 +44,13 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
struct
nvkm_fifo_chan
*
struct
nvkm_fifo_chan
*
nvkm_fifo_chan_inst
(
struct
nvkm_fifo
*
fifo
,
u64
inst
,
unsigned
long
*
rflags
)
nvkm_fifo_chan_inst
(
struct
nvkm_fifo
*
fifo
,
u64
inst
,
unsigned
long
*
rflags
)
{
{
struct
nvkm_fifo_chan
*
chan
;
unsigned
long
flags
;
unsigned
long
flags
;
int
i
;
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
for
(
i
=
fifo
->
min
;
i
<
fifo
->
max
;
i
++
)
{
list_for_each_entry
(
chan
,
&
fifo
->
chan
,
head
)
{
struct
nvkm_fifo_chan
*
chan
=
(
void
*
)
fifo
->
channel
[
i
];
if
(
chan
->
inst
->
addr
==
inst
)
{
if
(
chan
&&
chan
->
inst
==
inst
)
{
list_del
(
&
chan
->
head
);
list_add
(
&
chan
->
head
,
&
fifo
->
chan
);
*
rflags
=
flags
;
*
rflags
=
flags
;
return
chan
;
return
chan
;
}
}
...
@@ -61,45 +62,21 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
...
@@ -61,45 +62,21 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
struct
nvkm_fifo_chan
*
struct
nvkm_fifo_chan
*
nvkm_fifo_chan_chid
(
struct
nvkm_fifo
*
fifo
,
int
chid
,
unsigned
long
*
rflags
)
nvkm_fifo_chan_chid
(
struct
nvkm_fifo
*
fifo
,
int
chid
,
unsigned
long
*
rflags
)
{
{
struct
nvkm_fifo_chan
*
chan
;
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
if
(
fifo
->
channel
[
chid
])
{
list_for_each_entry
(
chan
,
&
fifo
->
chan
,
head
)
{
*
rflags
=
flags
;
if
(
chan
->
chid
==
chid
)
{
return
(
void
*
)
fifo
->
channel
[
chid
];
list_del
(
&
chan
->
head
);
list_add
(
&
chan
->
head
,
&
fifo
->
chan
);
*
rflags
=
flags
;
return
chan
;
}
}
}
spin_unlock_irqrestore
(
&
fifo
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
fifo
->
lock
,
flags
);
return
NULL
;
return
NULL
;
}
}
static
int
nvkm_fifo_chid
(
struct
nvkm_fifo
*
fifo
,
struct
nvkm_object
*
object
)
{
int
engidx
=
nv_hclass
(
fifo
)
&
0xff
;
while
(
object
&&
object
->
parent
)
{
if
(
nv_iclass
(
object
->
parent
,
NV_ENGCTX_CLASS
)
&&
(
nv_hclass
(
object
->
parent
)
&
0xff
)
==
engidx
)
return
nvkm_fifo_chan
(
object
)
->
chid
;
object
=
object
->
parent
;
}
return
-
1
;
}
const
char
*
nvkm_client_name_for_fifo_chid
(
struct
nvkm_fifo
*
fifo
,
u32
chid
)
{
struct
nvkm_fifo_chan
*
chan
=
NULL
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
if
(
chid
>=
fifo
->
min
&&
chid
<=
fifo
->
max
)
chan
=
(
void
*
)
fifo
->
channel
[
chid
];
spin_unlock_irqrestore
(
&
fifo
->
lock
,
flags
);
return
nvkm_client_name
(
chan
);
}
static
int
static
int
nvkm_fifo_event_ctor
(
struct
nvkm_object
*
object
,
void
*
data
,
u32
size
,
nvkm_fifo_event_ctor
(
struct
nvkm_object
*
object
,
void
*
data
,
u32
size
,
struct
nvkm_notify
*
notify
)
struct
nvkm_notify
*
notify
)
...
@@ -144,21 +121,62 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo)
...
@@ -144,21 +121,62 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo)
nvkm_event_send
(
&
fifo
->
uevent
,
1
,
0
,
&
rep
,
sizeof
(
rep
));
nvkm_event_send
(
&
fifo
->
uevent
,
1
,
0
,
&
rep
,
sizeof
(
rep
));
}
}
static
int
nvkm_fifo_class_new
(
struct
nvkm_device
*
device
,
const
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
const
struct
nvkm_fifo_chan_oclass
*
sclass
=
oclass
->
engn
;
struct
nvkm_fifo
*
fifo
=
nvkm_fifo
(
oclass
->
engine
);
return
sclass
->
ctor
(
fifo
,
oclass
,
data
,
size
,
pobject
);
}
static
const
struct
nvkm_device_oclass
nvkm_fifo_class
=
{
.
ctor
=
nvkm_fifo_class_new
,
};
static
int
nvkm_fifo_class_get
(
struct
nvkm_oclass
*
oclass
,
int
index
,
const
struct
nvkm_device_oclass
**
class
)
{
struct
nvkm_fifo
*
fifo
=
nvkm_fifo
(
oclass
->
engine
);
const
struct
nvkm_fifo_chan_oclass
*
sclass
;
int
c
=
0
;
while
((
sclass
=
fifo
->
func
->
chan
[
c
]))
{
if
(
c
++
==
index
)
{
oclass
->
base
=
sclass
->
base
;
oclass
->
engn
=
sclass
;
*
class
=
&
nvkm_fifo_class
;
return
0
;
}
}
return
c
;
}
void
void
nvkm_fifo_destroy
(
struct
nvkm_fifo
*
fifo
)
nvkm_fifo_destroy
(
struct
nvkm_fifo
*
fifo
)
{
{
kfree
(
fifo
->
channel
);
nvkm_event_fini
(
&
fifo
->
uevent
);
nvkm_event_fini
(
&
fifo
->
uevent
);
nvkm_event_fini
(
&
fifo
->
cevent
);
nvkm_event_fini
(
&
fifo
->
cevent
);
nvkm_engine_destroy
(
&
fifo
->
engine
);
nvkm_engine_destroy
(
&
fifo
->
engine
);
}
}
static
const
struct
nvkm_engine_func
nvkm_fifo_func
=
{
.
base
.
sclass
=
nvkm_fifo_class_get
,
};
int
int
nvkm_fifo_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nvkm_fifo_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
struct
nvkm_oclass
*
oclass
,
int
min
,
int
max
,
int
length
,
void
**
pobject
)
int
min
,
int
max
,
int
length
,
void
**
pobject
)
{
{
struct
nvkm_fifo
*
fifo
;
struct
nvkm_fifo
*
fifo
;
int
nr
=
max
+
1
;
int
cnt
=
nr
-
min
;
int
ret
;
int
ret
;
ret
=
nvkm_engine_create_
(
parent
,
engine
,
oclass
,
true
,
"PFIFO"
,
ret
=
nvkm_engine_create_
(
parent
,
engine
,
oclass
,
true
,
"PFIFO"
,
...
@@ -167,17 +185,21 @@ nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -167,17 +185,21 @@ nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
min
=
min
;
fifo
->
engine
.
func
=
&
nvkm_fifo_func
;
fifo
->
max
=
max
;
INIT_LIST_HEAD
(
&
fifo
->
chan
);
fifo
->
channel
=
kzalloc
(
sizeof
(
*
fifo
->
channel
)
*
(
max
+
1
),
GFP_KERNEL
);
if
(
!
fifo
->
channel
)
fifo
->
nr
=
nr
;
return
-
ENOMEM
;
if
(
WARN_ON
(
fifo
->
nr
>
NVKM_FIFO_CHID_NR
))
{
fifo
->
nr
=
NVKM_FIFO_CHID_NR
;
cnt
=
fifo
->
nr
-
min
;
}
bitmap_fill
(
fifo
->
mask
,
NVKM_FIFO_CHID_NR
);
bitmap_clear
(
fifo
->
mask
,
min
,
cnt
);
ret
=
nvkm_event_init
(
&
nvkm_fifo_event_func
,
1
,
1
,
&
fifo
->
cevent
);
ret
=
nvkm_event_init
(
&
nvkm_fifo_event_func
,
1
,
1
,
&
fifo
->
cevent
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
chid
=
nvkm_fifo_chid
;
spin_lock_init
(
&
fifo
->
lock
);
spin_lock_init
(
&
fifo
->
lock
);
return
0
;
return
0
;
}
}
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
View file @
8f0649b5
...
@@ -24,139 +24,472 @@
...
@@ -24,139 +24,472 @@
#include "chan.h"
#include "chan.h"
#include <core/client.h>
#include <core/client.h>
#include <core/oproxy.h>
#include <subdev/mmu.h>
#include <engine/dma.h>
#include <engine/dma.h>
#include <nvif/class.h>
struct
nvkm_fifo_chan_object
{
struct
nvkm_oproxy
oproxy
;
struct
nvkm_fifo_chan
*
chan
;
int
hash
;
};
int
static
int
_nvkm_fifo_channel_ntfy
(
struct
nvkm_object
*
object
,
u32
type
,
nvkm_fifo_chan_child_fini
(
struct
nvkm_oproxy
*
base
,
bool
suspend
)
struct
nvkm_event
**
event
)
{
struct
nvkm_fifo_chan_object
*
object
=
container_of
(
base
,
typeof
(
*
object
),
oproxy
);
struct
nvkm_engine
*
engine
=
object
->
oproxy
.
object
->
engine
;
struct
nvkm_fifo_chan
*
chan
=
object
->
chan
;
struct
nvkm_fifo_engn
*
engn
=
&
chan
->
engn
[
engine
->
subdev
.
index
];
const
char
*
name
=
nvkm_subdev_name
[
engine
->
subdev
.
index
];
int
ret
=
0
;
if
(
--
engn
->
usecount
)
return
0
;
if
(
chan
->
func
->
engine_fini
)
{
ret
=
chan
->
func
->
engine_fini
(
chan
,
engine
,
suspend
);
if
(
ret
)
{
nvif_error
(
&
chan
->
object
,
"detach %s failed, %d
\n
"
,
name
,
ret
);
return
ret
;
}
}
if
(
engn
->
object
)
{
ret
=
nvkm_object_fini
(
engn
->
object
,
suspend
);
if
(
ret
&&
suspend
)
return
ret
;
}
nvif_trace
(
&
chan
->
object
,
"detached %s
\n
"
,
name
);
return
ret
;
}
static
int
nvkm_fifo_chan_child_init
(
struct
nvkm_oproxy
*
base
)
{
struct
nvkm_fifo_chan_object
*
object
=
container_of
(
base
,
typeof
(
*
object
),
oproxy
);
struct
nvkm_engine
*
engine
=
object
->
oproxy
.
object
->
engine
;
struct
nvkm_fifo_chan
*
chan
=
object
->
chan
;
struct
nvkm_fifo_engn
*
engn
=
&
chan
->
engn
[
engine
->
subdev
.
index
];
const
char
*
name
=
nvkm_subdev_name
[
engine
->
subdev
.
index
];
int
ret
;
if
(
engn
->
usecount
++
)
return
0
;
if
(
engn
->
object
)
{
ret
=
nvkm_object_init
(
engn
->
object
);
if
(
ret
)
return
ret
;
}
if
(
chan
->
func
->
engine_init
)
{
ret
=
chan
->
func
->
engine_init
(
chan
,
engine
);
if
(
ret
)
{
nvif_error
(
&
chan
->
object
,
"attach %s failed, %d
\n
"
,
name
,
ret
);
return
ret
;
}
}
nvif_trace
(
&
chan
->
object
,
"attached %s
\n
"
,
name
);
return
0
;
}
static
void
nvkm_fifo_chan_child_del
(
struct
nvkm_oproxy
*
base
)
{
struct
nvkm_fifo_chan_object
*
object
=
container_of
(
base
,
typeof
(
*
object
),
oproxy
);
struct
nvkm_engine
*
engine
=
object
->
oproxy
.
base
.
engine
;
struct
nvkm_fifo_chan
*
chan
=
object
->
chan
;
struct
nvkm_fifo_engn
*
engn
=
&
chan
->
engn
[
engine
->
subdev
.
index
];
if
(
chan
->
func
->
object_dtor
)
chan
->
func
->
object_dtor
(
chan
,
object
->
hash
);
if
(
!--
engn
->
refcount
)
{
if
(
chan
->
func
->
engine_dtor
)
chan
->
func
->
engine_dtor
(
chan
,
engine
);
nvkm_object_ref
(
NULL
,
&
engn
->
object
);
if
(
chan
->
vm
)
atomic_dec
(
&
chan
->
vm
->
engref
[
engine
->
subdev
.
index
]);
}
}
static
const
struct
nvkm_oproxy_func
nvkm_fifo_chan_child_func
=
{
.
dtor
[
0
]
=
nvkm_fifo_chan_child_del
,
.
init
[
0
]
=
nvkm_fifo_chan_child_init
,
.
fini
[
0
]
=
nvkm_fifo_chan_child_fini
,
};
static
int
nvkm_fifo_chan_child_old
(
const
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
oclass
->
parent
);
switch
(
type
)
{
struct
nvkm_object
*
parent
=
&
chan
->
object
;
case
G82_CHANNEL_DMA_V0_NTFY_UEVENT
:
struct
nvkm_engine
*
engine
=
oclass
->
engine
;
if
(
nv_mclass
(
object
)
>=
G82_CHANNEL_DMA
)
{
struct
nvkm_oclass
*
eclass
=
(
void
*
)
oclass
->
priv
;
*
event
=
&
fifo
->
uevent
;
struct
nvkm_object
*
engctx
=
NULL
;
return
0
;
struct
nvkm_fifo_chan_object
*
object
;
struct
nvkm_fifo_engn
*
engn
=
&
chan
->
engn
[
engine
->
subdev
.
index
];
int
ret
;
if
(
!
(
object
=
kzalloc
(
sizeof
(
*
object
),
GFP_KERNEL
)))
return
-
ENOMEM
;
nvkm_oproxy_ctor
(
&
nvkm_fifo_chan_child_func
,
oclass
,
&
object
->
oproxy
);
*
pobject
=
&
object
->
oproxy
.
base
;
object
->
chan
=
chan
;
if
(
!
engn
->
refcount
++
)
{
if
(
chan
->
vm
)
atomic_inc
(
&
chan
->
vm
->
engref
[
engine
->
subdev
.
index
]);
if
(
engine
->
cclass
&&
!
engn
->
object
)
{
ret
=
nvkm_object_old
(
parent
,
&
engine
->
subdev
.
object
,
engine
->
cclass
,
NULL
,
0
,
&
engn
->
object
);
if
(
ret
)
{
nvkm_engine_unref
(
&
engine
);
return
ret
;
}
}
else
{
nvkm_object_ref
(
parent
,
&
engn
->
object
);
}
}
break
;
default:
if
(
chan
->
func
->
engine_ctor
)
{
break
;
ret
=
chan
->
func
->
engine_ctor
(
chan
,
engine
,
engn
->
object
);
if
(
ret
)
return
ret
;
}
}
nvkm_object_ref
(
engn
->
object
,
&
engctx
);
ret
=
nvkm_object_old
(
engctx
,
&
engine
->
subdev
.
object
,
eclass
,
data
,
size
,
&
object
->
oproxy
.
object
);
nvkm_object_ref
(
NULL
,
&
engctx
);
if
(
ret
)
return
ret
;
object
->
oproxy
.
object
->
handle
=
oclass
->
handle
;
if
(
chan
->
func
->
object_ctor
)
{
object
->
hash
=
chan
->
func
->
object_ctor
(
chan
,
object
->
oproxy
.
object
);
if
(
object
->
hash
<
0
)
return
object
->
hash
;
}
}
return
0
;
}
static
int
nvkm_fifo_chan_child_new
(
const
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_engine
*
engine
=
oclass
->
engine
;
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
oclass
->
parent
);
struct
nvkm_fifo_engn
*
engn
=
&
chan
->
engn
[
engine
->
subdev
.
index
];
struct
nvkm_fifo_chan_object
*
object
;
int
ret
=
0
;
if
(
!
(
object
=
kzalloc
(
sizeof
(
*
object
),
GFP_KERNEL
)))
return
-
ENOMEM
;
nvkm_oproxy_ctor
(
&
nvkm_fifo_chan_child_func
,
oclass
,
&
object
->
oproxy
);
object
->
chan
=
chan
;
*
pobject
=
&
object
->
oproxy
.
base
;
if
(
!
engn
->
refcount
++
)
{
struct
nvkm_oclass
cclass
=
{
.
client
=
oclass
->
client
,
.
engine
=
oclass
->
engine
,
};
if
(
chan
->
vm
)
atomic_inc
(
&
chan
->
vm
->
engref
[
engine
->
subdev
.
index
]);
if
(
engine
->
func
->
fifo
.
cclass
)
{
ret
=
engine
->
func
->
fifo
.
cclass
(
chan
,
&
cclass
,
&
engn
->
object
);
}
else
if
(
engine
->
func
->
cclass
)
{
ret
=
nvkm_object_new_
(
engine
->
func
->
cclass
,
&
cclass
,
NULL
,
0
,
&
engn
->
object
);
}
if
(
ret
)
return
ret
;
if
(
chan
->
func
->
engine_ctor
)
{
ret
=
chan
->
func
->
engine_ctor
(
chan
,
oclass
->
engine
,
engn
->
object
);
if
(
ret
)
return
ret
;
}
}
ret
=
oclass
->
base
.
ctor
(
&
(
const
struct
nvkm_oclass
)
{
.
base
=
oclass
->
base
,
.
engn
=
oclass
->
engn
,
.
handle
=
oclass
->
handle
,
.
object
=
oclass
->
object
,
.
client
=
oclass
->
client
,
.
parent
=
engn
->
object
?
engn
->
object
:
oclass
->
parent
,
.
engine
=
engine
,
},
data
,
size
,
&
object
->
oproxy
.
object
);
if
(
ret
)
return
ret
;
if
(
chan
->
func
->
object_ctor
)
{
object
->
hash
=
chan
->
func
->
object_ctor
(
chan
,
object
->
oproxy
.
object
);
if
(
object
->
hash
<
0
)
return
object
->
hash
;
}
return
0
;
}
static
int
nvkm_fifo_chan_child_get
(
struct
nvkm_object
*
object
,
int
index
,
struct
nvkm_oclass
*
oclass
)
{
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
);
struct
nvkm_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_device
*
device
=
fifo
->
engine
.
subdev
.
device
;
struct
nvkm_engine
*
engine
;
u64
mask
=
chan
->
engines
;
int
ret
,
i
,
c
;
for
(;
c
=
0
,
i
=
__ffs64
(
mask
),
mask
;
mask
&=
~
(
1ULL
<<
i
))
{
if
((
engine
=
nvkm_device_engine
(
device
,
i
))
&&
!
engine
->
func
)
{
struct
nvkm_oclass
*
sclass
=
engine
->
sclass
;
int
c
=
0
;
while
(
sclass
&&
sclass
->
ofuncs
)
{
if
(
c
++
==
index
)
{
oclass
->
base
.
oclass
=
sclass
->
handle
;
oclass
->
base
.
minver
=
-
2
;
oclass
->
base
.
maxver
=
-
2
;
oclass
->
ctor
=
nvkm_fifo_chan_child_old
;
oclass
->
priv
=
sclass
;
oclass
->
engine
=
engine
;
return
0
;
}
sclass
++
;
}
index
-=
c
;
continue
;
}
if
(
!
(
engine
=
nvkm_device_engine
(
device
,
i
)))
continue
;
oclass
->
engine
=
engine
;
oclass
->
base
.
oclass
=
0
;
if
(
engine
->
func
->
fifo
.
sclass
)
{
ret
=
engine
->
func
->
fifo
.
sclass
(
oclass
,
index
);
if
(
oclass
->
base
.
oclass
)
{
if
(
!
oclass
->
base
.
ctor
)
oclass
->
base
.
ctor
=
nvkm_object_new
;
oclass
->
ctor
=
nvkm_fifo_chan_child_new
;
return
0
;
}
index
-=
ret
;
continue
;
}
while
(
engine
->
func
->
sclass
[
c
].
oclass
)
{
if
(
c
++
==
index
)
{
oclass
->
base
=
engine
->
func
->
sclass
[
index
];
if
(
!
oclass
->
base
.
ctor
)
oclass
->
base
.
ctor
=
nvkm_object_new
;
oclass
->
ctor
=
nvkm_fifo_chan_child_new
;
return
0
;
}
}
index
-=
c
;
}
return
-
EINVAL
;
return
-
EINVAL
;
}
}
int
static
int
_nvkm_fifo_channel_map
(
struct
nvkm_object
*
object
,
u64
*
addr
,
u32
*
size
)
nvkm_fifo_chan_ntfy
(
struct
nvkm_object
*
object
,
u32
type
,
struct
nvkm_event
**
pevent
)
{
{
struct
nvkm_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
);
if
(
chan
->
func
->
ntfy
)
return
chan
->
func
->
ntfy
(
chan
,
type
,
pevent
);
return
-
ENODEV
;
}
static
int
nvkm_fifo_chan_map
(
struct
nvkm_object
*
object
,
u64
*
addr
,
u32
*
size
)
{
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
);
*
addr
=
chan
->
addr
;
*
addr
=
chan
->
addr
;
*
size
=
chan
->
size
;
*
size
=
chan
->
size
;
return
0
;
return
0
;
}
}
u32
static
int
_nvkm_fifo_channel_rd32
(
struct
nvkm_object
*
object
,
u64
addr
)
nvkm_fifo_chan_rd32
(
struct
nvkm_object
*
object
,
u64
addr
,
u32
*
data
)
{
{
struct
nvkm_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
)
;
if
(
unlikely
(
!
chan
->
user
))
{
if
(
unlikely
(
!
chan
->
user
))
{
chan
->
user
=
ioremap
(
chan
->
addr
,
chan
->
size
);
chan
->
user
=
ioremap
(
chan
->
addr
,
chan
->
size
);
if
(
WARN_ON_ONCE
(
chan
->
user
==
NULL
)
)
if
(
!
chan
->
user
)
return
0
;
return
-
ENOMEM
;
}
}
return
ioread32_native
(
chan
->
user
+
addr
);
if
(
unlikely
(
addr
+
4
>
chan
->
size
))
return
-
EINVAL
;
*
data
=
ioread32_native
(
chan
->
user
+
addr
);
return
0
;
}
}
void
static
int
_nvkm_fifo_channel
_wr32
(
struct
nvkm_object
*
object
,
u64
addr
,
u32
data
)
nvkm_fifo_chan
_wr32
(
struct
nvkm_object
*
object
,
u64
addr
,
u32
data
)
{
{
struct
nvkm_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
)
;
if
(
unlikely
(
!
chan
->
user
))
{
if
(
unlikely
(
!
chan
->
user
))
{
chan
->
user
=
ioremap
(
chan
->
addr
,
chan
->
size
);
chan
->
user
=
ioremap
(
chan
->
addr
,
chan
->
size
);
if
(
WARN_ON_ONCE
(
chan
->
user
==
NULL
)
)
if
(
!
chan
->
user
)
return
;
return
-
ENOMEM
;
}
}
if
(
unlikely
(
addr
+
4
>
chan
->
size
))
return
-
EINVAL
;
iowrite32_native
(
data
,
chan
->
user
+
addr
);
iowrite32_native
(
data
,
chan
->
user
+
addr
);
return
0
;
}
static
int
nvkm_fifo_chan_fini
(
struct
nvkm_object
*
object
,
bool
suspend
)
{
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
);
chan
->
func
->
fini
(
chan
);
return
0
;
}
}
void
static
int
nvkm_fifo_chan
nel_destroy
(
struct
nvkm_fifo_chan
*
chan
)
nvkm_fifo_chan
_init
(
struct
nvkm_object
*
object
)
{
{
struct
nvkm_fifo
*
fifo
=
(
void
*
)
nv_object
(
chan
)
->
engine
;
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
);
chan
->
func
->
init
(
chan
);
return
0
;
}
static
void
*
nvkm_fifo_chan_dtor
(
struct
nvkm_object
*
object
)
{
struct
nvkm_fifo_chan
*
chan
=
nvkm_fifo_chan
(
object
);
struct
nvkm_fifo
*
fifo
=
chan
->
fifo
;
void
*
data
=
chan
->
func
->
dtor
(
chan
);
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
if
(
!
list_empty
(
&
chan
->
head
))
{
__clear_bit
(
chan
->
chid
,
fifo
->
mask
);
list_del
(
&
chan
->
head
);
}
spin_unlock_irqrestore
(
&
fifo
->
lock
,
flags
);
if
(
chan
->
user
)
if
(
chan
->
user
)
iounmap
(
chan
->
user
);
iounmap
(
chan
->
user
);
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
nvkm_vm_ref
(
NULL
,
&
chan
->
vm
,
NULL
);
fifo
->
channel
[
chan
->
chid
]
=
NULL
;
spin_unlock_irqrestore
(
&
fifo
->
lock
,
flags
);
nvkm_gpuobj_del
(
&
chan
->
pushgpu
);
nvkm_gpuobj_del
(
&
chan
->
push
);
nvkm_namedb_destroy
(
&
chan
->
namedb
);
nvkm_gpuobj_del
(
&
chan
->
inst
);
return
data
;
}
}
void
const
struct
nvkm_object_func
_nvkm_fifo_channel_dtor
(
struct
nvkm_object
*
object
)
nvkm_fifo_chan_func
=
{
{
.
dtor
=
nvkm_fifo_chan_dtor
,
struct
nvkm_fifo_chan
*
chan
=
(
void
*
)
object
;
.
init
=
nvkm_fifo_chan_init
,
nvkm_fifo_channel_destroy
(
chan
);
.
fini
=
nvkm_fifo_chan_fini
,
}
.
ntfy
=
nvkm_fifo_chan_ntfy
,
.
map
=
nvkm_fifo_chan_map
,
.
rd32
=
nvkm_fifo_chan_rd32
,
.
wr32
=
nvkm_fifo_chan_wr32
,
.
sclass
=
nvkm_fifo_chan_child_get
,
};
int
int
nvkm_fifo_chan
nel_create_
(
struct
nvkm_object
*
parent
,
nvkm_fifo_chan
_ctor
(
const
struct
nvkm_fifo_chan_func
*
func
,
struct
nvkm_object
*
engine
,
struct
nvkm_fifo
*
fifo
,
u32
size
,
u32
align
,
bool
zero
,
struct
nvkm_oclass
*
oclass
,
u64
vm
,
u64
push
,
u64
engines
,
int
bar
,
u32
base
,
u32
user
,
int
bar
,
u32
addr
,
u32
size
,
u64
pushbuf
,
const
struct
nvkm_oclass
*
oclass
,
u64
engmask
,
int
len
,
void
**
ptr
)
struct
nvkm_fifo_chan
*
chan
)
{
{
struct
nvkm_client
*
client
=
nvkm_client
(
parent
);
struct
nvkm_client
*
client
=
oclass
->
client
;
struct
nvkm_fifo
*
fifo
=
(
void
*
)
engine
;
struct
nvkm_device
*
device
=
fifo
->
engine
.
subdev
.
device
;
struct
nvkm_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nvkm_mmu
*
mmu
=
device
->
mmu
;
struct
nvkm_fifo_chan
*
chan
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_dmaobj
*
dmaobj
;
struct
nvkm_dmaobj
*
dmaobj
;
unsigned
long
flags
;
unsigned
long
flags
;
int
ret
;
int
ret
;
/* create base object class */
nvkm_object_ctor
(
&
nvkm_fifo_chan_func
,
oclass
,
&
chan
->
object
);
ret
=
nvkm_namedb_create_
(
parent
,
engine
,
oclass
,
0
,
NULL
,
chan
->
func
=
func
;
engmask
,
len
,
ptr
);
chan
->
fifo
=
fifo
;
chan
=
*
ptr
;
chan
->
engines
=
engines
;
INIT_LIST_HEAD
(
&
chan
->
head
);
/* instance memory */
ret
=
nvkm_gpuobj_new
(
device
,
size
,
align
,
zero
,
NULL
,
&
chan
->
inst
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
/*
validate dma object representing push buffer
*/
/*
allocate push buffer ctxdma instance
*/
if
(
push
buf
)
{
if
(
push
)
{
dmaobj
=
nvkm_dma_search
(
device
->
dma
,
client
,
pushbuf
);
dmaobj
=
nvkm_dma_search
(
device
->
dma
,
oclass
->
client
,
push
);
if
(
!
dmaobj
)
if
(
!
dmaobj
)
return
-
ENOENT
;
return
-
ENOENT
;
ret
=
nvkm_object_bind
(
&
dmaobj
->
object
,
&
base
->
gpuobj
,
16
,
ret
=
nvkm_object_bind
(
&
dmaobj
->
object
,
chan
->
inst
,
-
16
,
&
chan
->
push
gpu
);
&
chan
->
push
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
}
}
/* find a free fifo channel */
/* channel address space */
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
if
(
!
vm
&&
mmu
)
{
for
(
chan
->
chid
=
fifo
->
min
;
chan
->
chid
<
fifo
->
max
;
chan
->
chid
++
)
{
if
(
!
client
->
vm
||
client
->
vm
->
mmu
==
mmu
)
{
if
(
!
fifo
->
channel
[
chan
->
chid
])
{
ret
=
nvkm_vm_ref
(
client
->
vm
,
&
chan
->
vm
,
NULL
);
fifo
->
channel
[
chan
->
chid
]
=
nv_object
(
chan
);
if
(
ret
)
break
;
return
ret
;
}
else
{
return
-
EINVAL
;
}
}
}
else
{
return
-
ENOENT
;
}
}
spin_unlock_irqrestore
(
&
fifo
->
lock
,
flags
);
if
(
chan
->
chid
==
fifo
->
max
)
{
/* allocate channel id */
nvkm_error
(
subdev
,
"no free channels
\n
"
);
spin_lock_irqsave
(
&
fifo
->
lock
,
flags
);
chan
->
chid
=
find_first_zero_bit
(
fifo
->
mask
,
NVKM_FIFO_CHID_NR
);
if
(
chan
->
chid
>=
NVKM_FIFO_CHID_NR
)
{
spin_unlock_irqrestore
(
&
fifo
->
lock
,
flags
);
return
-
ENOSPC
;
return
-
ENOSPC
;
}
}
list_add
(
&
chan
->
head
,
&
fifo
->
chan
);
__set_bit
(
chan
->
chid
,
fifo
->
mask
);
spin_unlock_irqrestore
(
&
fifo
->
lock
,
flags
);
/* determine address of this channel's user registers */
chan
->
addr
=
nv_device_resource_start
(
device
,
bar
)
+
chan
->
addr
=
nv_device_resource_start
(
device
,
bar
)
+
addr
+
size
*
chan
->
chid
;
base
+
user
*
chan
->
chid
;
chan
->
size
=
size
;
chan
->
size
=
user
;
nvkm_event_send
(
&
fifo
->
cevent
,
1
,
0
,
NULL
,
0
);
nvkm_event_send
(
&
fifo
->
cevent
,
1
,
0
,
NULL
,
0
);
return
0
;
return
0
;
}
}
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
View file @
8f0649b5
...
@@ -2,27 +2,31 @@
...
@@ -2,27 +2,31 @@
#define __NVKM_FIFO_CHAN_H__
#define __NVKM_FIFO_CHAN_H__
#include "priv.h"
#include "priv.h"
#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
struct
nvkm_fifo_chan_func
{
nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
void
*
(
*
dtor
)(
struct
nvkm_fifo_chan
*
);
(m), sizeof(**d), (void **)d)
void
(
*
init
)(
struct
nvkm_fifo_chan
*
);
#define nvkm_fifo_channel_init(p) \
void
(
*
fini
)(
struct
nvkm_fifo_chan
*
);
nvkm_namedb_init(&(p)->namedb)
int
(
*
ntfy
)(
struct
nvkm_fifo_chan
*
,
u32
type
,
struct
nvkm_event
**
);
#define nvkm_fifo_channel_fini(p,s) \
int
(
*
engine_ctor
)(
struct
nvkm_fifo_chan
*
,
struct
nvkm_engine
*
,
nvkm_namedb_fini(&(p)->namedb, (s))
struct
nvkm_object
*
);
void
(
*
engine_dtor
)(
struct
nvkm_fifo_chan
*
,
struct
nvkm_engine
*
);
int
(
*
engine_init
)(
struct
nvkm_fifo_chan
*
,
struct
nvkm_engine
*
);
int
(
*
engine_fini
)(
struct
nvkm_fifo_chan
*
,
struct
nvkm_engine
*
,
bool
suspend
);
int
(
*
object_ctor
)(
struct
nvkm_fifo_chan
*
,
struct
nvkm_object
*
);
void
(
*
object_dtor
)(
struct
nvkm_fifo_chan
*
,
int
);
};
int
nvkm_fifo_channel_create_
(
struct
nvkm_object
*
,
int
nvkm_fifo_chan_ctor
(
const
struct
nvkm_fifo_chan_func
*
,
struct
nvkm_fifo
*
,
struct
nvkm_object
*
,
u32
size
,
u32
align
,
bool
zero
,
u64
vm
,
u64
push
,
struct
nvkm_oclass
*
,
u64
engines
,
int
bar
,
u32
base
,
u32
user
,
int
bar
,
u32
addr
,
u32
size
,
u64
push
,
const
struct
nvkm_oclass
*
,
struct
nvkm_fifo_chan
*
);
u64
engmask
,
int
len
,
void
**
);
void
nvkm_fifo_channel_destroy
(
struct
nvkm_fifo_chan
*
);
#define _nvkm_fifo_channel_init _nvkm_namedb_init
struct
nvkm_fifo_chan_oclass
{
#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
int
(
*
ctor
)(
struct
nvkm_fifo
*
,
const
struct
nvkm_oclass
*
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
);
struct
nvkm_sclass
base
;
};
void
_nvkm_fifo_channel_dtor
(
struct
nvkm_object
*
);
int
g84_fifo_chan_ntfy
(
struct
nvkm_fifo_chan
*
,
u32
,
struct
nvkm_event
**
);
int
_nvkm_fifo_channel_map
(
struct
nvkm_object
*
,
u64
*
,
u32
*
);
u32
_nvkm_fifo_channel_rd32
(
struct
nvkm_object
*
,
u64
);
void
_nvkm_fifo_channel_wr32
(
struct
nvkm_object
*
,
u64
,
u32
);
int
_nvkm_fifo_channel_ntfy
(
struct
nvkm_object
*
,
u32
,
struct
nvkm_event
**
);
#endif
#endif
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
View file @
8f0649b5
...
@@ -25,38 +25,86 @@
...
@@ -25,38 +25,86 @@
#include <core/client.h>
#include <core/client.h>
#include <core/ramht.h>
#include <core/ramht.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
#include <subdev/timer.h>
#include <nvif/class.h>
int
int
g84_fifo_c
ontext_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
g84_fifo_c
han_ntfy
(
struct
nvkm_fifo_chan
*
chan
,
u32
type
,
struct
nvkm_object
*
objec
t
)
struct
nvkm_event
**
peven
t
)
{
{
struct
nv50_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
switch
(
type
)
{
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
case
G82_CHANNEL_DMA_V0_NTFY_UEVENT
:
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
*
pevent
=
&
chan
->
fifo
->
uevent
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
return
0
;
struct
nvkm_device
*
device
=
subdev
->
device
;
default:
u32
addr
,
save
,
engn
;
break
;
bool
done
;
}
return
-
EINVAL
;
}
switch
(
nv_engidx
(
object
->
engine
))
{
static
int
case
NVDEV_ENGINE_SW
:
return
0
;
g84_fifo_chan_engine
(
struct
nvkm_engine
*
engine
)
case
NVDEV_ENGINE_GR
:
engn
=
0
;
addr
=
0x0020
;
break
;
{
switch
(
engine
->
subdev
.
index
)
{
case
NVDEV_ENGINE_GR
:
return
0
;
case
NVDEV_ENGINE_MPEG
:
case
NVDEV_ENGINE_MSPPP
:
return
1
;
case
NVDEV_ENGINE_CE0
:
return
2
;
case
NVDEV_ENGINE_VP
:
case
NVDEV_ENGINE_VP
:
case
NVDEV_ENGINE_MSPDEC
:
engn
=
3
;
addr
=
0x0040
;
break
;
case
NVDEV_ENGINE_MSPDEC
:
return
3
;
case
NVDEV_ENGINE_
MSPPP
:
case
NVDEV_ENGINE_
CIPHER
:
case
NVDEV_ENGINE_
MPEG
:
engn
=
1
;
addr
=
0x0060
;
break
;
case
NVDEV_ENGINE_
SEC
:
return
4
;
case
NVDEV_ENGINE_BSP
:
case
NVDEV_ENGINE_BSP
:
case
NVDEV_ENGINE_MSVLD
:
engn
=
5
;
addr
=
0x0080
;
break
;
case
NVDEV_ENGINE_MSVLD
:
return
5
;
default:
WARN_ON
(
1
);
return
0
;
}
}
static
int
g84_fifo_chan_engine_addr
(
struct
nvkm_engine
*
engine
)
{
switch
(
engine
->
subdev
.
index
)
{
case
NVDEV_ENGINE_DMAOBJ
:
case
NVDEV_ENGINE_SW
:
return
-
1
;
case
NVDEV_ENGINE_GR
:
return
0x0020
;
case
NVDEV_ENGINE_VP
:
case
NVDEV_ENGINE_MSPDEC
:
return
0x0040
;
case
NVDEV_ENGINE_MPEG
:
case
NVDEV_ENGINE_MSPPP
:
return
0x0060
;
case
NVDEV_ENGINE_BSP
:
case
NVDEV_ENGINE_MSVLD
:
return
0x0080
;
case
NVDEV_ENGINE_CIPHER
:
case
NVDEV_ENGINE_CIPHER
:
case
NVDEV_ENGINE_SEC
:
engn
=
4
;
addr
=
0x00a0
;
break
;
case
NVDEV_ENGINE_SEC
:
return
0x00a0
;
case
NVDEV_ENGINE_CE0
:
engn
=
2
;
addr
=
0x00c0
;
break
;
case
NVDEV_ENGINE_CE0
:
return
0x00c0
;
default:
default:
return
-
EINVAL
;
WARN_ON
(
1
);
return
-
1
;
}
}
}
static
int
g84_fifo_chan_engine_fini
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
,
bool
suspend
)
{
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
struct
nv50_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
u32
engn
,
save
;
int
offset
;
bool
done
;
offset
=
g84_fifo_chan_engine_addr
(
engine
);
if
(
offset
<
0
)
return
0
;
engn
=
g84_fifo_chan_engine
(
engine
);
save
=
nvkm_mask
(
device
,
0x002520
,
0x0000003f
,
1
<<
engn
);
save
=
nvkm_mask
(
device
,
0x002520
,
0x0000003f
,
1
<<
engn
);
nvkm_wr32
(
device
,
0x0032fc
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x0032fc
,
chan
->
base
.
inst
->
addr
>>
12
);
done
=
nvkm_msec
(
device
,
2000
,
done
=
nvkm_msec
(
device
,
2000
,
if
(
nvkm_rd32
(
device
,
0x0032fc
)
!=
0xffffffff
)
if
(
nvkm_rd32
(
device
,
0x0032fc
)
!=
0xffffffff
)
break
;
break
;
...
@@ -64,168 +112,179 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -64,168 +112,179 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
nvkm_wr32
(
device
,
0x002520
,
save
);
nvkm_wr32
(
device
,
0x002520
,
save
);
if
(
!
done
)
{
if
(
!
done
)
{
nvkm_error
(
subdev
,
"channel %d [%s] unload timeout
\n
"
,
nvkm_error
(
subdev
,
"channel %d [%s] unload timeout
\n
"
,
chan
->
base
.
chid
,
nvkm_client_name
(
chan
)
);
chan
->
base
.
chid
,
chan
->
base
.
object
.
client
->
name
);
if
(
suspend
)
if
(
suspend
)
return
-
EBUSY
;
return
-
EBUSY
;
}
}
nvkm_kmap
(
base
->
eng
);
nvkm_kmap
(
chan
->
eng
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x00
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x04
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x04
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x08
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x08
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x0c
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x14
,
0x00000000
);
nvkm_done
(
base
->
eng
);
nvkm_done
(
chan
->
eng
);
return
0
;
return
0
;
}
}
int
int
g84_fifo_context_attach
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
object
)
g84_fifo_chan_engine_init
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
{
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
struct
nvkm_gpuobj
*
ectx
=
(
void
*
)
object
;
struct
nvkm_gpuobj
*
engn
=
chan
->
engn
[
engine
->
subdev
.
index
];
u64
limit
=
ectx
->
addr
+
ectx
->
size
-
1
;
u64
limit
,
start
;
u64
start
=
ectx
->
addr
;
int
offset
;
u32
addr
;
switch
(
nv_engidx
(
object
->
engine
))
{
case
NVDEV_ENGINE_SW
:
return
0
;
case
NVDEV_ENGINE_GR
:
addr
=
0x0020
;
break
;
case
NVDEV_ENGINE_VP
:
case
NVDEV_ENGINE_MSPDEC
:
addr
=
0x0040
;
break
;
case
NVDEV_ENGINE_MSPPP
:
case
NVDEV_ENGINE_MPEG
:
addr
=
0x0060
;
break
;
case
NVDEV_ENGINE_BSP
:
case
NVDEV_ENGINE_MSVLD
:
addr
=
0x0080
;
break
;
case
NVDEV_ENGINE_CIPHER
:
case
NVDEV_ENGINE_SEC
:
addr
=
0x00a0
;
break
;
case
NVDEV_ENGINE_CE0
:
addr
=
0x00c0
;
break
;
default:
return
-
EINVAL
;
}
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
offset
=
g84_fifo_chan_engine_addr
(
engine
);
nvkm_kmap
(
base
->
eng
);
if
(
offset
<
0
)
nvkm_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00190000
);
return
0
;
nvkm_wo32
(
base
->
eng
,
addr
+
0x04
,
lower_32_bits
(
limit
));
limit
=
engn
->
addr
+
engn
->
size
-
1
;
nvkm_wo32
(
base
->
eng
,
addr
+
0x08
,
lower_32_bits
(
start
));
start
=
engn
->
addr
;
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
upper_32_bits
(
limit
)
<<
24
|
upper_32_bits
(
start
));
nvkm_kmap
(
chan
->
eng
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x00
,
0x00190000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x04
,
lower_32_bits
(
limit
));
nvkm_done
(
base
->
eng
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x08
,
lower_32_bits
(
start
));
nvkm_wo32
(
chan
->
eng
,
offset
+
0x0c
,
upper_32_bits
(
limit
)
<<
24
|
upper_32_bits
(
start
));
nvkm_wo32
(
chan
->
eng
,
offset
+
0x10
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x14
,
0x00000000
);
nvkm_done
(
chan
->
eng
);
return
0
;
return
0
;
}
}
static
int
g84_fifo_chan_engine_ctor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
,
struct
nvkm_object
*
object
)
{
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
int
engn
=
engine
->
subdev
.
index
;
if
(
g84_fifo_chan_engine_addr
(
engine
)
<
0
)
return
0
;
if
(
nv_iclass
(
object
,
NV_GPUOBJ_CLASS
))
{
chan
->
engn
[
engn
]
=
nv_gpuobj
(
object
);
return
0
;
}
return
nvkm_object_bind
(
object
,
NULL
,
0
,
&
chan
->
engn
[
engn
]);
}
int
int
g84_fifo_
object_attach
(
struct
nvkm_object
*
parent
,
g84_fifo_
chan_object_ctor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_object
*
object
,
u32
handle
)
struct
nvkm_object
*
object
)
{
{
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
u32
handle
=
object
->
handle
;
u32
context
;
u32
context
;
if
(
nv_iclass
(
object
,
NV_GPUOBJ_CLASS
))
switch
(
object
->
engine
->
subdev
.
index
)
{
context
=
nv_gpuobj
(
object
)
->
node
->
offset
>>
4
;
case
NVDEV_ENGINE_DMAOBJ
:
else
case
NVDEV_ENGINE_SW
:
context
=
0x00000000
;
break
;
context
=
0x00000004
;
/* just non-zero */
case
NVDEV_ENGINE_GR
:
context
=
0x00100000
;
break
;
case
NVDEV_ENGINE_MPEG
:
if
(
object
->
engine
)
{
case
NVDEV_ENGINE_MSPPP
:
context
=
0x00200000
;
break
;
switch
(
nv_engidx
(
object
->
engine
))
{
case
NVDEV_ENGINE_ME
:
case
NVDEV_ENGINE_DMAOBJ
:
case
NVDEV_ENGINE_CE0
:
context
=
0x00300000
;
break
;
case
NVDEV_ENGINE_SW
:
context
|=
0x00000000
;
break
;
case
NVDEV_ENGINE_VP
:
case
NVDEV_ENGINE_GR
:
context
|=
0x00100000
;
break
;
case
NVDEV_ENGINE_MSPDEC
:
context
=
0x00400000
;
break
;
case
NVDEV_ENGINE_MPEG
:
case
NVDEV_ENGINE_CIPHER
:
case
NVDEV_ENGINE_MSPPP
:
context
|=
0x00200000
;
break
;
case
NVDEV_ENGINE_SEC
:
case
NVDEV_ENGINE_ME
:
case
NVDEV_ENGINE_VIC
:
context
=
0x00500000
;
break
;
case
NVDEV_ENGINE_CE0
:
context
|=
0x00300000
;
break
;
case
NVDEV_ENGINE_BSP
:
case
NVDEV_ENGINE_VP
:
case
NVDEV_ENGINE_MSVLD
:
context
=
0x00600000
;
break
;
case
NVDEV_ENGINE_MSPDEC
:
context
|=
0x00400000
;
break
;
default:
case
NVDEV_ENGINE_CIPHER
:
WARN_ON
(
1
);
case
NVDEV_ENGINE_SEC
:
return
-
EINVAL
;
case
NVDEV_ENGINE_VIC
:
context
|=
0x00500000
;
break
;
case
NVDEV_ENGINE_BSP
:
case
NVDEV_ENGINE_MSVLD
:
context
|=
0x00600000
;
break
;
default:
return
-
EINVAL
;
}
}
}
return
nvkm_ramht_insert
(
chan
->
ramht
,
NULL
,
0
,
0
,
handle
,
context
);
return
nvkm_ramht_insert
(
chan
->
ramht
,
object
,
0
,
4
,
handle
,
context
);
}
}
int
static
void
g84_fifo_chan_init
(
struct
nvkm_
object
*
object
)
g84_fifo_chan_init
(
struct
nvkm_
fifo_chan
*
base
)
{
{
struct
nv50_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
object
->
parent
;
struct
nv50_fifo
*
fifo
=
chan
->
fifo
;
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_gpuobj
*
ramfc
=
base
->
ramfc
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u64
addr
=
chan
->
ramfc
->
addr
>>
8
;
u32
chid
=
chan
->
base
.
chid
;
u32
chid
=
chan
->
base
.
chid
;
int
ret
;
ret
=
nvkm_fifo_channel_init
(
&
chan
->
base
);
if
(
ret
)
return
ret
;
nvkm_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
|
ramfc
->
addr
>>
8
);
nvkm_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
|
addr
);
nv50_fifo_runlist_update
(
fifo
);
nv50_fifo_runlist_update
(
fifo
);
return
0
;
}
}
static
int
static
const
struct
nvkm_fifo_chan_func
g84_fifo_context_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
g84_fifo_chan_func
=
{
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
.
dtor
=
nv50_fifo_chan_dtor
,
struct
nvkm_object
**
pobject
)
.
init
=
g84_fifo_chan_init
,
.
fini
=
nv50_fifo_chan_fini
,
.
ntfy
=
g84_fifo_chan_ntfy
,
.
engine_ctor
=
g84_fifo_chan_engine_ctor
,
.
engine_dtor
=
nv50_fifo_chan_engine_dtor
,
.
engine_init
=
g84_fifo_chan_engine_init
,
.
engine_fini
=
g84_fifo_chan_engine_fini
,
.
object_ctor
=
g84_fifo_chan_object_ctor
,
.
object_dtor
=
nv50_fifo_chan_object_dtor
,
};
int
g84_fifo_chan_ctor
(
struct
nv50_fifo
*
fifo
,
u64
vm
,
u64
push
,
const
struct
nvkm_oclass
*
oclass
,
struct
nv50_fifo_chan
*
chan
)
{
{
struct
nvkm_device
*
device
=
nv_engine
(
engine
)
->
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nv50_fifo_base
*
base
;
int
ret
;
int
ret
;
ret
=
nvkm_fifo_context_create
(
parent
,
engine
,
oclass
,
NULL
,
0x10000
,
ret
=
nvkm_fifo_chan_ctor
(
&
g84_fifo_chan_func
,
&
fifo
->
base
,
0x1000
,
NVOBJ_FLAG_HEAP
,
&
base
);
0x10000
,
0x1000
,
false
,
vm
,
push
,
*
pobject
=
nv_object
(
base
);
(
1ULL
<<
NVDEV_ENGINE_BSP
)
|
(
1ULL
<<
NVDEV_ENGINE_CE0
)
|
(
1ULL
<<
NVDEV_ENGINE_CIPHER
)
|
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_ME
)
|
(
1ULL
<<
NVDEV_ENGINE_MPEG
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPDEC
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPPP
)
|
(
1ULL
<<
NVDEV_ENGINE_MSVLD
)
|
(
1ULL
<<
NVDEV_ENGINE_SEC
)
|
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_VIC
)
|
(
1ULL
<<
NVDEV_ENGINE_VP
),
0
,
0xc00000
,
0x2000
,
oclass
,
&
chan
->
base
);
chan
->
fifo
=
fifo
;
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x0200
,
0
,
true
,
&
base
->
base
.
gpuobj
,
ret
=
nvkm_gpuobj_new
(
device
,
0x0200
,
0
,
true
,
chan
->
base
.
inst
,
&
base
->
eng
);
&
chan
->
eng
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x4000
,
0
,
false
,
&
base
->
base
.
gpuobj
,
ret
=
nvkm_gpuobj_new
(
device
,
0x4000
,
0
,
false
,
chan
->
base
.
inst
,
&
base
->
pgd
);
&
chan
->
pgd
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_vm_ref
(
nvkm_client
(
parent
)
->
vm
,
&
base
->
vm
,
base
->
pgd
);
ret
=
nvkm_gpuobj_new
(
device
,
0x1000
,
0x400
,
true
,
chan
->
base
.
inst
,
&
chan
->
cache
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x100
0
,
0x400
,
true
,
&
base
->
base
.
gpuobj
,
ret
=
nvkm_gpuobj_new
(
device
,
0x100
,
0x100
,
true
,
chan
->
base
.
inst
,
&
base
->
cache
);
&
chan
->
ramfc
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x100
,
0x100
,
true
,
&
base
->
base
.
gpuobj
,
ret
=
nvkm_ramht_new
(
device
,
0x8000
,
16
,
chan
->
base
.
inst
,
&
chan
->
ramht
);
&
base
->
ramfc
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
return
0
;
return
nvkm_vm_ref
(
chan
->
base
.
vm
,
&
chan
->
vm
,
chan
->
pgd
)
;
}
}
struct
nvkm_oclass
g84_fifo_cclass
=
{
.
handle
=
NV_ENGCTX
(
FIFO
,
0x84
),
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
g84_fifo_context_ctor
,
.
dtor
=
nv50_fifo_context_dtor
,
.
init
=
_nvkm_fifo_context_init
,
.
fini
=
_nvkm_fifo_context_fini
,
.
rd32
=
_nvkm_fifo_context_rd32
,
.
wr32
=
_nvkm_fifo_context_wr32
,
},
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
View file @
8f0649b5
#ifndef __GF100_FIFO_CHAN_H__
#ifndef __GF100_FIFO_CHAN_H__
#define __GF100_FIFO_CHAN_H__
#define __GF100_FIFO_CHAN_H__
#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
#include "chan.h"
#include "chan.h"
#include "gf100.h"
#include "gf100.h"
struct
gf100_fifo_base
{
struct
gf100_fifo_chan
{
struct
nvkm_fifo_base
base
;
struct
nvkm_fifo_chan
base
;
struct
gf100_fifo
*
fifo
;
struct
list_head
head
;
bool
killed
;
struct
nvkm_gpuobj
*
pgd
;
struct
nvkm_gpuobj
*
pgd
;
struct
nvkm_vm
*
vm
;
struct
nvkm_vm
*
vm
;
};
struct
gf100_fifo_chan
{
struct
{
struct
nvkm_fifo_chan
base
;
struct
nvkm_gpuobj
*
inst
;
enum
{
struct
nvkm_vma
vma
;
STOPPED
,
}
engn
[
NVDEV_SUBDEV_NR
];
RUNNING
,
KILLED
}
state
;
};
};
extern
struct
nvkm_oclass
gf100_fifo_cclass
;
extern
const
struct
nvkm_fifo_chan_oclass
gf100_fifo_gpfifo_oclass
;
extern
struct
nvkm_oclass
gf100_fifo_sclass
[];
#endif
#endif
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
View file @
8f0649b5
#ifndef __GK104_FIFO_CHAN_H__
#ifndef __GK104_FIFO_CHAN_H__
#define __GK104_FIFO_CHAN_H__
#define __GK104_FIFO_CHAN_H__
#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
#include "chan.h"
#include "chan.h"
#include "gk104.h"
#include "gk104.h"
struct
gk104_fifo_base
{
struct
gk104_fifo_chan
{
struct
nvkm_fifo_base
base
;
struct
nvkm_fifo_chan
base
;
struct
gk104_fifo
*
fifo
;
int
engine
;
struct
list_head
head
;
bool
killed
;
struct
nvkm_gpuobj
*
pgd
;
struct
nvkm_gpuobj
*
pgd
;
struct
nvkm_vm
*
vm
;
struct
nvkm_vm
*
vm
;
};
struct
gk104_fifo_chan
{
struct
{
struct
nvkm_fifo_chan
base
;
struct
nvkm_gpuobj
*
inst
;
u32
engine
;
struct
nvkm_vma
vma
;
enum
{
}
engn
[
NVDEV_SUBDEV_NR
];
STOPPED
,
RUNNING
,
KILLED
}
state
;
};
};
extern
struct
nvkm_oclass
gk104_fifo_cclass
;
int
gk104_fifo_gpfifo_new
(
struct
nvkm_fifo
*
,
const
struct
nvkm_oclass
*
,
extern
struct
nvkm_oclass
gk104_fifo_sclass
[];
void
*
data
,
u32
size
,
struct
nvkm_object
**
);
extern
struct
nvkm_ofuncs
gk104_fifo_chan_ofuncs
;
extern
struct
nvkm_oclass
gm204_fifo_sclass
[];
extern
const
struct
nvkm_fifo_chan_oclass
gk104_fifo_gpfifo_oclass
;
extern
const
struct
nvkm_fifo_chan_oclass
gm204_fifo_gpfifo_oclass
;
#endif
#endif
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
View file @
8f0649b5
#ifndef __NV04_FIFO_CHAN_H__
#ifndef __NV04_FIFO_CHAN_H__
#define __NV04_FIFO_CHAN_H__
#define __NV04_FIFO_CHAN_H__
#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
#include "chan.h"
#include "chan.h"
#include "nv04.h"
#include "nv04.h"
struct
nv04_fifo_chan
{
struct
nv04_fifo_chan
{
struct
nvkm_fifo_chan
base
;
struct
nvkm_fifo_chan
base
;
u32
subc
[
8
]
;
struct
nv04_fifo
*
fifo
;
u32
ramfc
;
u32
ramfc
;
struct
nvkm_gpuobj
*
engn
[
NVDEV_SUBDEV_NR
];
};
};
int
nv04_fifo_object_attach
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
u32
);
extern
const
struct
nvkm_fifo_chan_func
nv04_fifo_dma_func
;
void
nv04_fifo_object_detach
(
struct
nvkm_object
*
,
int
);
void
*
nv04_fifo_dma_dtor
(
struct
nvkm_fifo_chan
*
);
void
nv04_fifo_dma_init
(
struct
nvkm_fifo_chan
*
);
void
nv04_fifo_dma_fini
(
struct
nvkm_fifo_chan
*
);
void
nv04_fifo_dma_object_dtor
(
struct
nvkm_fifo_chan
*
,
int
);
void
nv04_fifo_chan_dtor
(
struct
nvkm_object
*
);
extern
const
struct
nvkm_fifo_chan_oclass
nv04_fifo_dma_oclass
;
int
nv04_fifo_chan_init
(
struct
nvkm_object
*
);
extern
const
struct
nvkm_fifo_chan_oclass
nv10_fifo_dma_oclass
;
int
nv04_fifo_chan_fini
(
struct
nvkm_object
*
,
bool
suspend
);
extern
const
struct
nvkm_fifo_chan_oclass
nv17_fifo_dma_oclass
;
extern
const
struct
nvkm_fifo_chan_oclass
nv40_fifo_dma_oclass
;
extern
struct
nvkm_oclass
nv04_fifo_cclass
;
extern
struct
nvkm_oclass
nv04_fifo_sclass
[];
extern
struct
nvkm_oclass
nv10_fifo_sclass
[];
extern
struct
nvkm_oclass
nv17_fifo_sclass
[];
extern
struct
nvkm_oclass
nv40_fifo_sclass
[];
#endif
#endif
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
View file @
8f0649b5
...
@@ -25,27 +25,37 @@
...
@@ -25,27 +25,37 @@
#include <core/client.h>
#include <core/client.h>
#include <core/ramht.h>
#include <core/ramht.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
#include <subdev/timer.h>
int
static
int
nv50_fifo_context_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
nv50_fifo_chan_engine_addr
(
struct
nvkm_engine
*
engine
)
struct
nvkm_object
*
object
)
{
{
struct
nv50_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
switch
(
engine
->
subdev
.
index
)
{
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
case
NVDEV_ENGINE_DMAOBJ
:
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
case
NVDEV_ENGINE_SW
:
return
-
1
;
case
NVDEV_ENGINE_GR
:
return
0x0000
;
case
NVDEV_ENGINE_MPEG
:
return
0x0060
;
default:
WARN_ON
(
1
);
return
-
1
;
}
}
static
int
nv50_fifo_chan_engine_fini
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
,
bool
suspend
)
{
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
struct
nv50_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_device
*
device
=
subdev
->
device
;
u32
addr
,
me
;
int
offset
,
ret
=
0
;
int
ret
=
0
;
u32
me
;
switch
(
nv_engidx
(
object
->
engine
))
{
offset
=
nv50_fifo_chan_engine_addr
(
engine
);
case
NVDEV_ENGINE_SW
:
return
0
;
if
(
offset
<
0
)
case
NVDEV_ENGINE_GR
:
addr
=
0x0000
;
break
;
return
0
;
case
NVDEV_ENGINE_MPEG
:
addr
=
0x0060
;
break
;
default:
return
-
EINVAL
;
}
/* HW bug workaround:
/* HW bug workaround:
*
*
...
@@ -62,101 +72,124 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -62,101 +72,124 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
me
=
nvkm_mask
(
device
,
0x00b860
,
0x00000001
,
0x00000001
);
me
=
nvkm_mask
(
device
,
0x00b860
,
0x00000001
,
0x00000001
);
/* do the kickoff... */
/* do the kickoff... */
nvkm_wr32
(
device
,
0x0032fc
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x0032fc
,
chan
->
base
.
inst
->
addr
>>
12
);
if
(
nvkm_msec
(
device
,
2000
,
if
(
nvkm_msec
(
device
,
2000
,
if
(
nvkm_rd32
(
device
,
0x0032fc
)
!=
0xffffffff
)
if
(
nvkm_rd32
(
device
,
0x0032fc
)
!=
0xffffffff
)
break
;
break
;
)
<
0
)
{
)
<
0
)
{
nvkm_error
(
subdev
,
"channel %d [%s] unload timeout
\n
"
,
nvkm_error
(
subdev
,
"channel %d [%s] unload timeout
\n
"
,
chan
->
base
.
chid
,
nvkm_client_name
(
chan
)
);
chan
->
base
.
chid
,
chan
->
base
.
object
.
client
->
name
);
if
(
suspend
)
if
(
suspend
)
ret
=
-
EBUSY
;
ret
=
-
EBUSY
;
}
}
nvkm_wr32
(
device
,
0x00b860
,
me
);
nvkm_wr32
(
device
,
0x00b860
,
me
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
nvkm_kmap
(
base
->
eng
);
nvkm_kmap
(
chan
->
eng
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x00
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x04
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x04
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x08
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x08
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x0c
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x14
,
0x00000000
);
nvkm_done
(
base
->
eng
);
nvkm_done
(
chan
->
eng
);
}
}
return
ret
;
return
ret
;
}
}
int
static
int
nv50_fifo_context_attach
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
object
)
nv50_fifo_chan_engine_init
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
{
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
struct
nvkm_gpuobj
*
ectx
=
(
void
*
)
object
;
struct
nvkm_gpuobj
*
engn
=
chan
->
engn
[
engine
->
subdev
.
index
];
u64
limit
=
ectx
->
addr
+
ectx
->
size
-
1
;
u64
limit
,
start
;
u64
start
=
ectx
->
addr
;
int
offset
;
u32
addr
;
offset
=
nv50_fifo_chan_engine_addr
(
engine
);
switch
(
nv_engidx
(
object
->
engine
))
{
if
(
offset
<
0
)
case
NVDEV_ENGINE_SW
:
return
0
;
return
0
;
case
NVDEV_ENGINE_GR
:
addr
=
0x0000
;
break
;
limit
=
engn
->
addr
+
engn
->
size
-
1
;
case
NVDEV_ENGINE_MPEG
:
addr
=
0x0060
;
break
;
start
=
engn
->
addr
;
default:
return
-
EINVAL
;
nvkm_kmap
(
chan
->
eng
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x00
,
0x00190000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x04
,
lower_32_bits
(
limit
));
nvkm_wo32
(
chan
->
eng
,
offset
+
0x08
,
lower_32_bits
(
start
));
nvkm_wo32
(
chan
->
eng
,
offset
+
0x0c
,
upper_32_bits
(
limit
)
<<
24
|
upper_32_bits
(
start
));
nvkm_wo32
(
chan
->
eng
,
offset
+
0x10
,
0x00000000
);
nvkm_wo32
(
chan
->
eng
,
offset
+
0x14
,
0x00000000
);
nvkm_done
(
chan
->
eng
);
return
0
;
}
void
nv50_fifo_chan_engine_dtor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
if
(
!
chan
->
engn
[
engine
->
subdev
.
index
]
||
chan
->
engn
[
engine
->
subdev
.
index
]
->
object
.
oclass
)
{
chan
->
engn
[
engine
->
subdev
.
index
]
=
NULL
;
return
;
}
}
nvkm_gpuobj_del
(
&
chan
->
engn
[
engine
->
subdev
.
index
]);
}
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
static
int
nv50_fifo_chan_engine_ctor
(
struct
nvkm_fifo_chan
*
base
,
nvkm_kmap
(
base
->
eng
);
struct
nvkm_engine
*
engine
,
nvkm_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00190000
);
struct
nvkm_object
*
object
)
nvkm_wo32
(
base
->
eng
,
addr
+
0x04
,
lower_32_bits
(
limit
));
{
nvkm_wo32
(
base
->
eng
,
addr
+
0x08
,
lower_32_bits
(
start
));
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
upper_32_bits
(
limit
)
<<
24
|
int
engn
=
engine
->
subdev
.
index
;
upper_32_bits
(
start
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
if
(
nv50_fifo_chan_engine_addr
(
engine
)
<
0
)
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
return
0
;
nvkm_done
(
base
->
eng
);
return
0
;
if
(
nv_iclass
(
object
,
NV_GPUOBJ_CLASS
))
{
chan
->
engn
[
engn
]
=
nv_gpuobj
(
object
);
return
0
;
}
return
nvkm_object_bind
(
object
,
NULL
,
0
,
&
chan
->
engn
[
engn
]);
}
}
void
void
nv50_fifo_
object_detach
(
struct
nvkm_object
*
parent
,
int
cookie
)
nv50_fifo_
chan_object_dtor
(
struct
nvkm_fifo_chan
*
base
,
int
cookie
)
{
{
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
)
;
nvkm_ramht_remove
(
chan
->
ramht
,
cookie
);
nvkm_ramht_remove
(
chan
->
ramht
,
cookie
);
}
}
int
static
int
nv50_fifo_
object_attach
(
struct
nvkm_object
*
parent
,
nv50_fifo_
chan_object_ctor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_object
*
object
,
u32
handle
)
struct
nvkm_object
*
object
)
{
{
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
u32
handle
=
object
->
handle
;
u32
context
;
u32
context
;
if
(
nv_iclass
(
object
,
NV_GPUOBJ_CLASS
))
switch
(
object
->
engine
->
subdev
.
index
)
{
context
=
nv_gpuobj
(
object
)
->
node
->
offset
>>
4
;
case
NVDEV_ENGINE_DMAOBJ
:
else
case
NVDEV_ENGINE_SW
:
context
=
0x00000000
;
break
;
context
=
0x00000004
;
/* just non-zero */
case
NVDEV_ENGINE_GR
:
context
=
0x00100000
;
break
;
case
NVDEV_ENGINE_MPEG
:
context
=
0x00200000
;
break
;
if
(
object
->
engine
)
{
default:
switch
(
nv_engidx
(
object
->
engine
))
{
WARN_ON
(
1
);
case
NVDEV_ENGINE_DMAOBJ
:
return
-
EINVAL
;
case
NVDEV_ENGINE_SW
:
context
|=
0x00000000
;
break
;
case
NVDEV_ENGINE_GR
:
context
|=
0x00100000
;
break
;
case
NVDEV_ENGINE_MPEG
:
context
|=
0x00200000
;
break
;
default:
return
-
EINVAL
;
}
}
}
return
nvkm_ramht_insert
(
chan
->
ramht
,
NULL
,
0
,
0
,
handle
,
context
);
return
nvkm_ramht_insert
(
chan
->
ramht
,
object
,
0
,
4
,
handle
,
context
);
}
}
int
void
nv50_fifo_chan_fini
(
struct
nvkm_
object
*
object
,
bool
suspend
)
nv50_fifo_chan_fini
(
struct
nvkm_
fifo_chan
*
base
)
{
{
struct
nv50_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv50_fifo
_chan
*
chan
=
nv50_fifo_chan
(
base
)
;
struct
nv50_fifo
_chan
*
chan
=
(
void
*
)
object
;
struct
nv50_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
u32
chid
=
chan
->
base
.
chid
;
...
@@ -164,96 +197,84 @@ nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
...
@@ -164,96 +197,84 @@ nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
nvkm_mask
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
,
0x00000000
);
nvkm_mask
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
,
0x00000000
);
nv50_fifo_runlist_update
(
fifo
);
nv50_fifo_runlist_update
(
fifo
);
nvkm_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x00000000
);
nvkm_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x00000000
);
return
nvkm_fifo_channel_fini
(
&
chan
->
base
,
suspend
);
}
}
int
static
void
nv50_fifo_chan_init
(
struct
nvkm_
object
*
object
)
nv50_fifo_chan_init
(
struct
nvkm_
fifo_chan
*
base
)
{
{
struct
nv50_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
object
->
parent
;
struct
nv50_fifo
*
fifo
=
chan
->
fifo
;
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_gpuobj
*
ramfc
=
base
->
ramfc
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u64
addr
=
chan
->
ramfc
->
addr
>>
12
;
u32
chid
=
chan
->
base
.
chid
;
u32
chid
=
chan
->
base
.
chid
;
int
ret
;
ret
=
nvkm_fifo_channel_init
(
&
chan
->
base
);
if
(
ret
)
return
ret
;
nvkm_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
|
ramfc
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x002600
+
(
chid
*
4
),
0x80000000
|
addr
);
nv50_fifo_runlist_update
(
fifo
);
nv50_fifo_runlist_update
(
fifo
);
return
0
;
}
}
void
void
*
nv50_fifo_chan_dtor
(
struct
nvkm_
object
*
object
)
nv50_fifo_chan_dtor
(
struct
nvkm_
fifo_chan
*
base
)
{
{
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nv50_fifo_chan
*
chan
=
nv50_fifo_chan
(
base
);
nvkm_vm_ref
(
NULL
,
&
chan
->
vm
,
chan
->
pgd
);
nvkm_ramht_del
(
&
chan
->
ramht
);
nvkm_ramht_del
(
&
chan
->
ramht
);
nvkm_fifo_channel_destroy
(
&
chan
->
base
);
nvkm_gpuobj_del
(
&
chan
->
pgd
);
nvkm_gpuobj_del
(
&
chan
->
eng
);
nvkm_gpuobj_del
(
&
chan
->
cache
);
nvkm_gpuobj_del
(
&
chan
->
ramfc
);
return
chan
;
}
}
void
static
const
struct
nvkm_fifo_chan_func
nv50_fifo_context_dtor
(
struct
nvkm_object
*
object
)
nv50_fifo_chan_func
=
{
{
.
dtor
=
nv50_fifo_chan_dtor
,
struct
nv50_fifo_base
*
base
=
(
void
*
)
object
;
.
init
=
nv50_fifo_chan_init
,
nvkm_vm_ref
(
NULL
,
&
base
->
vm
,
base
->
pgd
);
.
fini
=
nv50_fifo_chan_fini
,
nvkm_gpuobj_del
(
&
base
->
pgd
);
.
engine_ctor
=
nv50_fifo_chan_engine_ctor
,
nvkm_gpuobj_del
(
&
base
->
eng
);
.
engine_dtor
=
nv50_fifo_chan_engine_dtor
,
nvkm_gpuobj_del
(
&
base
->
ramfc
);
.
engine_init
=
nv50_fifo_chan_engine_init
,
nvkm_gpuobj_del
(
&
base
->
cache
);
.
engine_fini
=
nv50_fifo_chan_engine_fini
,
nvkm_fifo_context_destroy
(
&
base
->
base
);
.
object_ctor
=
nv50_fifo_chan_object_ctor
,
}
.
object_dtor
=
nv50_fifo_chan_object_dtor
,
};
static
int
int
nv50_fifo_c
ontext_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nv50_fifo_c
han_ctor
(
struct
nv50_fifo
*
fifo
,
u64
vm
,
u64
push
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_object
**
pobject
)
struct
nv50_fifo_chan
*
chan
)
{
{
struct
nvkm_device
*
device
=
nv_engine
(
engine
)
->
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nv50_fifo_base
*
base
;
int
ret
;
int
ret
;
ret
=
nvkm_fifo_context_create
(
parent
,
engine
,
oclass
,
NULL
,
0x10000
,
ret
=
nvkm_fifo_chan_ctor
(
&
nv50_fifo_chan_func
,
&
fifo
->
base
,
0x1000
,
NVOBJ_FLAG_HEAP
,
&
base
);
0x10000
,
0x1000
,
false
,
vm
,
push
,
*
pobject
=
nv_object
(
base
);
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_MPEG
),
0
,
0xc00000
,
0x2000
,
oclass
,
&
chan
->
base
);
chan
->
fifo
=
fifo
;
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x0200
,
0x1000
,
true
,
&
base
->
base
.
gpuobj
,
ret
=
nvkm_gpuobj_new
(
device
,
0x0200
,
0x1000
,
true
,
chan
->
base
.
inst
,
&
base
->
ramfc
);
&
chan
->
ramfc
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x1200
,
0
,
true
,
&
base
->
base
.
gpuobj
,
ret
=
nvkm_gpuobj_new
(
device
,
0x1200
,
0
,
true
,
chan
->
base
.
inst
,
&
base
->
eng
);
&
chan
->
eng
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x4000
,
0
,
false
,
&
base
->
base
.
gpuobj
,
ret
=
nvkm_gpuobj_new
(
device
,
0x4000
,
0
,
false
,
chan
->
base
.
inst
,
&
base
->
pgd
);
&
chan
->
pgd
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
ret
=
nvkm_
vm_ref
(
nvkm_client
(
parent
)
->
vm
,
&
base
->
vm
,
base
->
pgd
);
ret
=
nvkm_
ramht_new
(
device
,
0x8000
,
16
,
chan
->
base
.
inst
,
&
chan
->
ramht
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
return
0
;
return
nvkm_vm_ref
(
chan
->
base
.
vm
,
&
chan
->
vm
,
chan
->
pgd
)
;
}
}
struct
nvkm_oclass
nv50_fifo_cclass
=
{
.
handle
=
NV_ENGCTX
(
FIFO
,
0x50
),
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
nv50_fifo_context_ctor
,
.
dtor
=
nv50_fifo_context_dtor
,
.
init
=
_nvkm_fifo_context_init
,
.
fini
=
_nvkm_fifo_context_fini
,
.
rd32
=
_nvkm_fifo_context_rd32
,
.
wr32
=
_nvkm_fifo_context_wr32
,
},
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
View file @
8f0649b5
#ifndef __NV50_FIFO_CHAN_H__
#ifndef __NV50_FIFO_CHAN_H__
#define __NV50_FIFO_CHAN_H__
#define __NV50_FIFO_CHAN_H__
#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
#include "chan.h"
#include "chan.h"
#include "nv50.h"
#include "nv50.h"
struct
nv50_fifo_base
{
struct
nv50_fifo_chan
{
struct
nvkm_fifo_base
base
;
struct
nv50_fifo
*
fifo
;
struct
nvkm_fifo_chan
base
;
struct
nvkm_gpuobj
*
ramfc
;
struct
nvkm_gpuobj
*
ramfc
;
struct
nvkm_gpuobj
*
cache
;
struct
nvkm_gpuobj
*
cache
;
struct
nvkm_gpuobj
*
eng
;
struct
nvkm_gpuobj
*
eng
;
struct
nvkm_gpuobj
*
pgd
;
struct
nvkm_gpuobj
*
pgd
;
struct
nvkm_ramht
*
ramht
;
struct
nvkm_vm
*
vm
;
struct
nvkm_vm
*
vm
;
};
struct
nv50_fifo_chan
{
struct
nvkm_gpuobj
*
engn
[
NVDEV_SUBDEV_NR
];
struct
nvkm_fifo_chan
base
;
u32
subc
[
8
];
struct
nvkm_ramht
*
ramht
;
};
};
extern
struct
nvkm_oclass
nv50_fifo_cclass
;
int
nv50_fifo_chan_ctor
(
struct
nv50_fifo
*
,
u64
vm
,
u64
push
,
extern
struct
nvkm_oclass
nv50_fifo_sclass
[];
const
struct
nvkm_oclass
*
,
struct
nv50_fifo_chan
*
);
void
nv50_fifo_context_dtor
(
struct
nvkm_object
*
);
void
*
nv50_fifo_chan_dtor
(
struct
nvkm_fifo_chan
*
);
void
nv50_fifo_chan_dtor
(
struct
nvkm_object
*
);
void
nv50_fifo_chan_fini
(
struct
nvkm_fifo_chan
*
);
int
nv50_fifo_chan_init
(
struct
nvkm_object
*
);
void
nv50_fifo_chan_engine_dtor
(
struct
nvkm_fifo_chan
*
,
struct
nvkm_engine
*
);
int
nv50_fifo_chan_fini
(
struct
nvkm_object
*
,
bool
);
void
nv50_fifo_chan_object_dtor
(
struct
nvkm_fifo_chan
*
,
int
);
int
nv50_fifo_context_attach
(
struct
nvkm_object
*
,
struct
nvkm_object
*
);
int
nv50_fifo_context_detach
(
struct
nvkm_object
*
,
bool
,
int
g84_fifo_chan_ctor
(
struct
nv50_fifo
*
,
u64
vm
,
u64
push
,
struct
nvkm_object
*
);
const
struct
nvkm_oclass
*
,
struct
nv50_fifo_chan
*
);
int
nv50_fifo_object_attach
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
u32
);
void
nv50_fifo_object_detach
(
struct
nvkm_object
*
,
int
);
extern
struct
nvkm_ofuncs
nv50_fifo_ofuncs_ind
;
extern
struct
nvkm_oclass
g84_fifo_cclass
;
extern
const
struct
nvkm_fifo_chan_oclass
nv50_fifo_dma_oclass
;
extern
struct
nvkm_oclass
g84_fifo_sclass
[];
extern
const
struct
nvkm_fifo_chan_oclass
nv50_fifo_gpfifo_oclass
;
int
g84_fifo_chan_init
(
struct
nvkm_object
*
);
extern
const
struct
nvkm_fifo_chan_oclass
g84_fifo_dma_oclass
;
int
g84_fifo_context_attach
(
struct
nvkm_object
*
,
struct
nvkm_object
*
);
extern
const
struct
nvkm_fifo_chan_oclass
g84_fifo_gpfifo_oclass
;
int
g84_fifo_context_detach
(
struct
nvkm_object
*
,
bool
,
struct
nvkm_object
*
);
int
g84_fifo_object_attach
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
u32
);
extern
struct
nvkm_ofuncs
g84_fifo_ofuncs_ind
;
#endif
#endif
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
View file @
8f0649b5
...
@@ -30,15 +30,14 @@
...
@@ -30,15 +30,14 @@
#include <nvif/unpack.h>
#include <nvif/unpack.h>
static
int
static
int
g84_fifo_chan_ctor_dma
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
g84_fifo_dma_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_object
*
parent
=
oclass
->
parent
;
union
{
union
{
struct
nv50_channel_dma_v0
v0
;
struct
nv50_channel_dma_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
nvkm_device
*
device
=
parent
->
engine
->
subdev
.
device
;
struct
nv50_fifo
*
fifo
=
nv50_fifo
(
base
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
;
struct
nv50_fifo_chan
*
chan
;
int
ret
;
int
ret
;
...
@@ -48,80 +47,47 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -48,80 +47,47 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
"pushbuf %llx offset %016llx
\n
"
,
"pushbuf %llx offset %016llx
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
args
->
v0
.
offset
);
if
(
args
->
v0
.
vm
)
if
(
!
args
->
v0
.
pushbuf
)
return
-
E
NOENT
;
return
-
E
INVAL
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
0
,
0xc00000
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
0x2000
,
args
->
v0
.
pushbuf
,
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_MPEG
)
|
(
1ULL
<<
NVDEV_ENGINE_ME
)
|
(
1ULL
<<
NVDEV_ENGINE_VP
)
|
(
1ULL
<<
NVDEV_ENGINE_CIPHER
)
|
(
1ULL
<<
NVDEV_ENGINE_SEC
)
|
(
1ULL
<<
NVDEV_ENGINE_BSP
)
|
(
1ULL
<<
NVDEV_ENGINE_MSVLD
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPDEC
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPPP
)
|
(
1ULL
<<
NVDEV_ENGINE_CE0
)
|
(
1ULL
<<
NVDEV_ENGINE_VIC
),
&
chan
);
*
pobject
=
nv_object
(
chan
);
if
(
ret
)
return
ret
;
chan
->
base
.
inst
=
base
->
base
.
gpuobj
.
addr
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
ret
=
nvkm_ramht_new
(
device
,
0x8000
,
16
,
&
base
->
base
.
gpuobj
,
ret
=
g84_fifo_chan_ctor
(
fifo
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
&
chan
->
ramht
);
oclass
,
chan
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
nv_parent
(
chan
)
->
context_attach
=
g84_fifo_context_attach
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
context_detach
=
g84_fifo_context_detach
;
nv_parent
(
chan
)
->
object_attach
=
g84_fifo_object_attach
;
nv_parent
(
chan
)
->
object_detach
=
nv50_fifo_object_detach
;
nvkm_kmap
(
base
->
ramfc
);
nvkm_kmap
(
chan
->
ramfc
);
nvkm_wo32
(
base
->
ramfc
,
0x08
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
chan
->
ramfc
,
0x08
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x0c
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
chan
->
ramfc
,
0x0c
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x10
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
chan
->
ramfc
,
0x10
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x14
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
chan
->
ramfc
,
0x14
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x3c
,
0x003f6078
);
nvkm_wo32
(
chan
->
ramfc
,
0x3c
,
0x003f6078
);
nvkm_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
chan
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nvkm_wo32
(
chan
->
ramfc
,
0x48
,
chan
->
base
.
push
->
node
->
offset
>>
4
);
nvkm_wo32
(
base
->
ramfc
,
0x4c
,
0xffffffff
);
nvkm_wo32
(
chan
->
ramfc
,
0x4c
,
0xffffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
chan
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
chan
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
chan
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
nvkm_wo32
(
chan
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
chan
->
ramht
->
gpuobj
->
node
->
offset
>>
4
));
(
chan
->
ramht
->
gpuobj
->
node
->
offset
>>
4
));
nvkm_wo32
(
base
->
ramfc
,
0x88
,
base
->
cache
->
addr
>>
10
);
nvkm_wo32
(
chan
->
ramfc
,
0x88
,
chan
->
cache
->
addr
>>
10
);
nvkm_wo32
(
base
->
ramfc
,
0x98
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
nvkm_wo32
(
chan
->
ramfc
,
0x98
,
chan
->
base
.
inst
->
addr
>>
12
);
nvkm_done
(
base
->
ramfc
);
nvkm_done
(
chan
->
ramfc
);
return
0
;
return
0
;
}
}
static
struct
nvkm_ofuncs
const
struct
nvkm_fifo_chan_oclass
g84_fifo_ofuncs_dma
=
{
g84_fifo_dma_oclass
=
{
.
ctor
=
g84_fifo_chan_ctor_dma
,
.
base
.
oclass
=
G82_CHANNEL_DMA
,
.
dtor
=
nv50_fifo_chan_dtor
,
.
base
.
minver
=
0
,
.
init
=
g84_fifo_chan_init
,
.
base
.
maxver
=
0
,
.
fini
=
nv50_fifo_chan_fini
,
.
ctor
=
g84_fifo_dma_new
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
struct
nvkm_oclass
g84_fifo_sclass
[]
=
{
{
G82_CHANNEL_DMA
,
&
g84_fifo_ofuncs_dma
},
{
G82_CHANNEL_GPFIFO
,
&
g84_fifo_ofuncs_ind
},
{}
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
View file @
8f0649b5
...
@@ -31,74 +31,51 @@
...
@@ -31,74 +31,51 @@
#include <nvif/class.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
#include <nvif/unpack.h>
int
nv04_fifo_context_attach
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
object
)
{
nv_engctx
(
object
)
->
addr
=
nvkm_fifo_chan
(
parent
)
->
chid
;
return
0
;
}
void
void
nv04_fifo_
object_detach
(
struct
nvkm_object
*
parent
,
int
cookie
)
nv04_fifo_
dma_object_dtor
(
struct
nvkm_fifo_chan
*
base
,
int
cookie
)
{
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv04_fifo_chan
*
chan
=
nv04_fifo_chan
(
base
);
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
struct
nvkm_instmem
*
imem
=
chan
->
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
mutex_lock
(
&
nv_subdev
(
fifo
)
->
mutex
);
nvkm_ramht_remove
(
imem
->
ramht
,
cookie
);
nvkm_ramht_remove
(
imem
->
ramht
,
cookie
);
mutex_unlock
(
&
nv_subdev
(
fifo
)
->
mutex
);
}
}
int
static
int
nv04_fifo_
object_attach
(
struct
nvkm_object
*
parent
,
nv04_fifo_
dma_object_ctor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_object
*
object
,
u32
handle
)
struct
nvkm_object
*
object
)
{
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv04_fifo_chan
*
chan
=
nv04_fifo_chan
(
base
);
struct
nv04_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_instmem
*
imem
=
chan
->
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
u32
context
=
0x80000000
|
chan
->
base
.
chid
<<
24
;
u32
context
,
chid
=
chan
->
base
.
chid
;
u32
handle
=
object
->
handle
;
int
ret
;
int
hash
;
if
(
nv_iclass
(
object
,
NV_GPUOBJ_CLASS
))
switch
(
object
->
engine
->
subdev
.
index
)
{
context
=
nv_gpuobj
(
object
)
->
addr
>>
4
;
case
NVDEV_ENGINE_DMAOBJ
:
else
case
NVDEV_ENGINE_SW
:
context
|=
0x00000000
;
break
;
context
=
0x00000004
;
/* just non-zero */
case
NVDEV_ENGINE_GR
:
context
|=
0x00010000
;
break
;
case
NVDEV_ENGINE_MPEG
:
context
|=
0x00020000
;
break
;
if
(
object
->
engine
)
{
default:
switch
(
nv_engidx
(
object
->
engine
))
{
WARN_ON
(
1
);
case
NVDEV_ENGINE_DMAOBJ
:
return
-
EINVAL
;
case
NVDEV_ENGINE_SW
:
context
|=
0x00000000
;
break
;
case
NVDEV_ENGINE_GR
:
context
|=
0x00010000
;
break
;
case
NVDEV_ENGINE_MPEG
:
context
|=
0x00020000
;
break
;
default:
return
-
EINVAL
;
}
}
}
context
|=
0x80000000
;
/* valid */
mutex_lock
(
&
chan
->
fifo
->
base
.
engine
.
subdev
.
mutex
);
context
|=
chid
<<
24
;
hash
=
nvkm_ramht_insert
(
imem
->
ramht
,
object
,
chan
->
base
.
chid
,
4
,
handle
,
context
);
mutex_lock
(
&
nv_subdev
(
fifo
)
->
mutex
);
mutex_unlock
(
&
chan
->
fifo
->
base
.
engine
.
subdev
.
mutex
);
ret
=
nvkm_ramht_insert
(
imem
->
ramht
,
NULL
,
chid
,
0
,
handle
,
context
);
return
hash
;
mutex_unlock
(
&
nv_subdev
(
fifo
)
->
mutex
);
return
ret
;
}
}
int
void
nv04_fifo_
chan_fini
(
struct
nvkm_object
*
object
,
bool
suspend
)
nv04_fifo_
dma_fini
(
struct
nvkm_fifo_chan
*
base
)
{
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv04_fifo
_chan
*
chan
=
nv04_fifo_chan
(
base
)
;
struct
nv04_fifo
_chan
*
chan
=
(
void
*
)
object
;
struct
nv04_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_memory
*
fctx
=
device
->
imem
->
ramfc
;
struct
nvkm_memory
*
fctx
=
device
->
imem
->
ramfc
;
struct
ramfc_desc
*
c
;
struct
ramfc_desc
*
c
;
unsigned
long
flags
;
unsigned
long
flags
;
u32
mask
=
fifo
->
base
.
nr
-
1
;
u32
data
=
chan
->
ramfc
;
u32
data
=
chan
->
ramfc
;
u32
chid
;
u32
chid
;
...
@@ -107,7 +84,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
...
@@ -107,7 +84,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
nvkm_wr32
(
device
,
NV03_PFIFO_CACHES
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHES
,
0
);
/* if this channel is active, replace it with a null context */
/* if this channel is active, replace it with a null context */
chid
=
nvkm_rd32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
)
&
fifo
->
base
.
max
;
chid
=
nvkm_rd32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
)
&
mask
;
if
(
chid
==
chan
->
base
.
chid
)
{
if
(
chid
==
chan
->
base
.
chid
)
{
nvkm_mask
(
device
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
0x00000001
,
0
);
nvkm_mask
(
device
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
0x00000001
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
0
);
...
@@ -129,7 +106,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
...
@@ -129,7 +106,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_GET
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_GET
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUT
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUT
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
mask
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH0
,
1
);
nvkm_wr32
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
nvkm_wr32
(
device
,
NV04_PFIFO_CACHE1_PULL0
,
1
);
}
}
...
@@ -138,35 +115,26 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
...
@@ -138,35 +115,26 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
nvkm_mask
(
device
,
NV04_PFIFO_MODE
,
1
<<
chan
->
base
.
chid
,
0
);
nvkm_mask
(
device
,
NV04_PFIFO_MODE
,
1
<<
chan
->
base
.
chid
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHES
,
1
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHES
,
1
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
return
nvkm_fifo_channel_fini
(
&
chan
->
base
,
suspend
);
}
}
int
void
nv04_fifo_
chan_init
(
struct
nvkm_object
*
object
)
nv04_fifo_
dma_init
(
struct
nvkm_fifo_chan
*
base
)
{
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv04_fifo
_chan
*
chan
=
nv04_fifo_chan
(
base
)
;
struct
nv04_fifo
_chan
*
chan
=
(
void
*
)
object
;
struct
nv04_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
mask
=
1
<<
chan
->
base
.
chid
;
u32
mask
=
1
<<
chan
->
base
.
chid
;
unsigned
long
flags
;
unsigned
long
flags
;
int
ret
;
ret
=
nvkm_fifo_channel_init
(
&
chan
->
base
);
if
(
ret
)
return
ret
;
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
nvkm_mask
(
device
,
NV04_PFIFO_MODE
,
mask
,
mask
);
nvkm_mask
(
device
,
NV04_PFIFO_MODE
,
mask
,
mask
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
return
0
;
}
}
void
void
*
nv04_fifo_
chan_dtor
(
struct
nvkm_object
*
object
)
nv04_fifo_
dma_dtor
(
struct
nvkm_fifo_chan
*
base
)
{
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
nv04_fifo
_chan
*
chan
=
nv04_fifo_chan
(
base
)
;
struct
nv04_fifo
_chan
*
chan
=
(
void
*
)
object
;
struct
nv04_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
struct
ramfc_desc
*
c
=
fifo
->
ramfc_desc
;
struct
ramfc_desc
*
c
=
fifo
->
ramfc_desc
;
...
@@ -175,22 +143,30 @@ nv04_fifo_chan_dtor(struct nvkm_object *object)
...
@@ -175,22 +143,30 @@ nv04_fifo_chan_dtor(struct nvkm_object *object)
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
c
->
ctxp
,
0x00000000
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
c
->
ctxp
,
0x00000000
);
}
while
((
++
c
)
->
bits
);
}
while
((
++
c
)
->
bits
);
nvkm_done
(
imem
->
ramfc
);
nvkm_done
(
imem
->
ramfc
);
return
chan
;
nvkm_fifo_channel_destroy
(
&
chan
->
base
);
}
}
const
struct
nvkm_fifo_chan_func
nv04_fifo_dma_func
=
{
.
dtor
=
nv04_fifo_dma_dtor
,
.
init
=
nv04_fifo_dma_init
,
.
fini
=
nv04_fifo_dma_fini
,
.
object_ctor
=
nv04_fifo_dma_object_ctor
,
.
object_dtor
=
nv04_fifo_dma_object_dtor
,
};
static
int
static
int
nv04_fifo_chan_ctor
(
struct
nvkm_object
*
parent
,
nv04_fifo_dma_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_object
*
engine
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_object
*
parent
=
oclass
->
parent
;
union
{
union
{
struct
nv03_channel_dma_v0
v0
;
struct
nv03_channel_dma_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
nv04_fifo
*
fifo
=
(
void
*
)
engine
;
struct
nv04_fifo
*
fifo
=
nv04_fifo
(
base
);
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
struct
nv04_fifo_chan
*
chan
=
NULL
;
struct
nv04_fifo_chan
*
chan
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
int
ret
;
int
ret
;
nvif_ioctl
(
parent
,
"create channel dma size %d
\n
"
,
size
);
nvif_ioctl
(
parent
,
"create channel dma size %d
\n
"
,
size
);
...
@@ -198,29 +174,32 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -198,29 +174,32 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
nvif_ioctl
(
parent
,
"create channel dma vers %d pushbuf %llx "
nvif_ioctl
(
parent
,
"create channel dma vers %d pushbuf %llx "
"offset %08x
\n
"
,
args
->
v0
.
version
,
"offset %08x
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
if
(
!
args
->
v0
.
pushbuf
)
return
-
EINVAL
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
0
,
0x800000
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
0x10000
,
args
->
v0
.
pushbuf
,
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
),
&
chan
);
ret
=
nvkm_fifo_chan_ctor
(
&
nv04_fifo_dma_func
,
&
fifo
->
base
,
*
pobject
=
nv_object
(
chan
);
0x1000
,
0x1000
,
false
,
0
,
args
->
v0
.
pushbuf
,
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_SW
),
0
,
0x800000
,
0x10000
,
oclass
,
&
chan
->
base
);
chan
->
fifo
=
fifo
;
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
object_attach
=
nv04_fifo_object_attach
;
nv_parent
(
chan
)
->
object_detach
=
nv04_fifo_object_detach
;
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
chan
->
ramfc
=
chan
->
base
.
chid
*
32
;
chan
->
ramfc
=
chan
->
base
.
chid
*
32
;
nvkm_kmap
(
imem
->
ramfc
);
nvkm_kmap
(
imem
->
ramfc
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x08
,
chan
->
base
.
push
gpu
->
addr
>>
4
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x08
,
chan
->
base
.
push
->
addr
>>
4
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x10
,
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x10
,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
...
@@ -232,51 +211,10 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -232,51 +211,10 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
return
0
;
return
0
;
}
}
static
struct
nvkm_ofuncs
const
struct
nvkm_fifo_chan_oclass
nv04_fifo_ofuncs
=
{
nv04_fifo_dma_oclass
=
{
.
ctor
=
nv04_fifo_chan_ctor
,
.
base
.
oclass
=
NV03_CHANNEL_DMA
,
.
dtor
=
nv04_fifo_chan_dtor
,
.
base
.
minver
=
0
,
.
init
=
nv04_fifo_chan_init
,
.
base
.
maxver
=
0
,
.
fini
=
nv04_fifo_chan_fini
,
.
ctor
=
nv04_fifo_dma_new
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
struct
nvkm_oclass
nv04_fifo_sclass
[]
=
{
{
NV03_CHANNEL_DMA
,
&
nv04_fifo_ofuncs
},
{}
};
int
nv04_fifo_context_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nv04_fifo_base
*
base
;
int
ret
;
ret
=
nvkm_fifo_context_create
(
parent
,
engine
,
oclass
,
NULL
,
0x1000
,
0x1000
,
NVOBJ_FLAG_HEAP
,
&
base
);
*
pobject
=
nv_object
(
base
);
if
(
ret
)
return
ret
;
return
0
;
}
struct
nvkm_oclass
nv04_fifo_cclass
=
{
.
handle
=
NV_ENGCTX
(
FIFO
,
0x04
),
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
nv04_fifo_context_ctor
,
.
dtor
=
_nvkm_fifo_context_dtor
,
.
init
=
_nvkm_fifo_context_init
,
.
fini
=
_nvkm_fifo_context_fini
,
.
rd32
=
_nvkm_fifo_context_rd32
,
.
wr32
=
_nvkm_fifo_context_wr32
,
},
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
View file @
8f0649b5
...
@@ -31,17 +31,17 @@
...
@@ -31,17 +31,17 @@
#include <nvif/unpack.h>
#include <nvif/unpack.h>
static
int
static
int
nv10_fifo_chan_ctor
(
struct
nvkm_object
*
parent
,
nv10_fifo_dma_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_object
*
engine
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_object
*
parent
=
oclass
->
parent
;
union
{
union
{
struct
nv03_channel_dma_v0
v0
;
struct
nv03_channel_dma_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
nv04_fifo
*
fifo
=
(
void
*
)
engine
;
struct
nv04_fifo
*
fifo
=
nv04_fifo
(
base
);
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
struct
nv04_fifo_chan
*
chan
=
NULL
;
struct
nv04_fifo_chan
*
chan
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
int
ret
;
int
ret
;
nvif_ioctl
(
parent
,
"create channel dma size %d
\n
"
,
size
);
nvif_ioctl
(
parent
,
"create channel dma size %d
\n
"
,
size
);
...
@@ -49,29 +49,32 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -49,29 +49,32 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
nvif_ioctl
(
parent
,
"create channel dma vers %d pushbuf %llx "
nvif_ioctl
(
parent
,
"create channel dma vers %d pushbuf %llx "
"offset %08x
\n
"
,
args
->
v0
.
version
,
"offset %08x
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
if
(
!
args
->
v0
.
pushbuf
)
return
-
EINVAL
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
0
,
0x800000
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
0x10000
,
args
->
v0
.
pushbuf
,
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
),
&
chan
);
ret
=
nvkm_fifo_chan_ctor
(
&
nv04_fifo_dma_func
,
&
fifo
->
base
,
*
pobject
=
nv_object
(
chan
);
0x1000
,
0x1000
,
false
,
0
,
args
->
v0
.
pushbuf
,
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_SW
),
0
,
0x800000
,
0x10000
,
oclass
,
&
chan
->
base
);
chan
->
fifo
=
fifo
;
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
object_attach
=
nv04_fifo_object_attach
;
nv_parent
(
chan
)
->
object_detach
=
nv04_fifo_object_detach
;
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
chan
->
ramfc
=
chan
->
base
.
chid
*
32
;
chan
->
ramfc
=
chan
->
base
.
chid
*
32
;
nvkm_kmap
(
imem
->
ramfc
);
nvkm_kmap
(
imem
->
ramfc
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
push
gpu
->
addr
>>
4
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
push
->
addr
>>
4
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x14
,
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x14
,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
...
@@ -83,20 +86,10 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -83,20 +86,10 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
return
0
;
return
0
;
}
}
static
struct
nvkm_ofuncs
const
struct
nvkm_fifo_chan_oclass
nv10_fifo_ofuncs
=
{
nv10_fifo_dma_oclass
=
{
.
ctor
=
nv10_fifo_chan_ctor
,
.
base
.
oclass
=
NV10_CHANNEL_DMA
,
.
dtor
=
nv04_fifo_chan_dtor
,
.
base
.
minver
=
0
,
.
init
=
nv04_fifo_chan_init
,
.
base
.
maxver
=
0
,
.
fini
=
nv04_fifo_chan_fini
,
.
ctor
=
nv10_fifo_dma_new
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
struct
nvkm_oclass
nv10_fifo_sclass
[]
=
{
{
NV10_CHANNEL_DMA
,
&
nv10_fifo_ofuncs
},
{}
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
View file @
8f0649b5
...
@@ -31,17 +31,17 @@
...
@@ -31,17 +31,17 @@
#include <nvif/unpack.h>
#include <nvif/unpack.h>
static
int
static
int
nv17_fifo_chan_ctor
(
struct
nvkm_object
*
parent
,
nv17_fifo_dma_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_object
*
engine
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_object
*
parent
=
oclass
->
parent
;
union
{
union
{
struct
nv03_channel_dma_v0
v0
;
struct
nv03_channel_dma_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
nv04_fifo
*
fifo
=
(
void
*
)
engine
;
struct
nv04_fifo
*
fifo
=
nv04_fifo
(
base
);
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
struct
nv04_fifo_chan
*
chan
=
NULL
;
struct
nv04_fifo_chan
*
chan
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
int
ret
;
int
ret
;
nvif_ioctl
(
parent
,
"create channel dma size %d
\n
"
,
size
);
nvif_ioctl
(
parent
,
"create channel dma size %d
\n
"
,
size
);
...
@@ -49,31 +49,33 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -49,31 +49,33 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
nvif_ioctl
(
parent
,
"create channel dma vers %d pushbuf %llx "
nvif_ioctl
(
parent
,
"create channel dma vers %d pushbuf %llx "
"offset %08x
\n
"
,
args
->
v0
.
version
,
"offset %08x
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
if
(
!
args
->
v0
.
pushbuf
)
return
-
EINVAL
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
0
,
0x800000
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
0x10000
,
args
->
v0
.
pushbuf
,
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
ret
=
nvkm_fifo_chan_ctor
(
&
nv04_fifo_dma_func
,
&
fifo
->
base
,
(
1ULL
<<
NVDEV_ENGINE_MPEG
),
/* NV31- */
0x1000
,
0x1000
,
false
,
0
,
args
->
v0
.
pushbuf
,
&
chan
);
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
nv_object
(
chan
);
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_MPEG
)
|
/* NV31- */
(
1ULL
<<
NVDEV_ENGINE_SW
),
0
,
0x800000
,
0x10000
,
oclass
,
&
chan
->
base
);
chan
->
fifo
=
fifo
;
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
object_attach
=
nv04_fifo_object_attach
;
nv_parent
(
chan
)
->
object_detach
=
nv04_fifo_object_detach
;
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
chan
->
ramfc
=
chan
->
base
.
chid
*
64
;
chan
->
ramfc
=
chan
->
base
.
chid
*
64
;
nvkm_kmap
(
imem
->
ramfc
);
nvkm_kmap
(
imem
->
ramfc
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
push
gpu
->
addr
>>
4
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
push
->
addr
>>
4
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x14
,
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x14
,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
...
@@ -85,20 +87,10 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -85,20 +87,10 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
return
0
;
return
0
;
}
}
static
struct
nvkm_ofuncs
const
struct
nvkm_fifo_chan_oclass
nv17_fifo_ofuncs
=
{
nv17_fifo_dma_oclass
=
{
.
ctor
=
nv17_fifo_chan_ctor
,
.
base
.
oclass
=
NV17_CHANNEL_DMA
,
.
dtor
=
nv04_fifo_chan_dtor
,
.
base
.
minver
=
0
,
.
init
=
nv04_fifo_chan_init
,
.
base
.
maxver
=
0
,
.
fini
=
nv04_fifo_chan_fini
,
.
ctor
=
nv17_fifo_dma_new
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
struct
nvkm_oclass
nv17_fifo_sclass
[]
=
{
{
NV17_CHANNEL_DMA
,
&
nv17_fifo_ofuncs
},
{}
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
View file @
8f0649b5
...
@@ -31,36 +31,47 @@
...
@@ -31,36 +31,47 @@
#include <nvif/class.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
#include <nvif/unpack.h>
static
bool
nv40_fifo_dma_engine
(
struct
nvkm_engine
*
engine
,
u32
*
reg
,
u32
*
ctx
)
{
switch
(
engine
->
subdev
.
index
)
{
case
NVDEV_ENGINE_DMAOBJ
:
case
NVDEV_ENGINE_SW
:
return
false
;
case
NVDEV_ENGINE_GR
:
*
reg
=
0x0032e0
;
*
ctx
=
0x38
;
return
true
;
case
NVDEV_ENGINE_MPEG
:
*
reg
=
0x00330c
;
*
ctx
=
0x54
;
return
true
;
default:
WARN_ON
(
1
);
return
false
;
}
}
static
int
static
int
nv40_fifo_
context_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
nv40_fifo_
dma_engine_fini
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_object
*
engctx
)
struct
nvkm_engine
*
engine
,
bool
suspend
)
{
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv04_fifo
_chan
*
chan
=
nv04_fifo_chan
(
base
)
;
struct
nv04_fifo
_chan
*
chan
=
(
void
*
)
parent
;
struct
nv04_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
unsigned
long
flags
;
unsigned
long
flags
;
u32
reg
,
ctx
;
u32
reg
,
ctx
;
int
chid
;
switch
(
nv_engidx
(
engctx
->
engine
))
{
if
(
!
nv40_fifo_dma_engine
(
engine
,
&
reg
,
&
ctx
))
case
NVDEV_ENGINE_SW
:
return
0
;
return
0
;
case
NVDEV_ENGINE_GR
:
reg
=
0x32e0
;
ctx
=
0x38
;
break
;
case
NVDEV_ENGINE_MPEG
:
reg
=
0x330c
;
ctx
=
0x54
;
break
;
default:
return
-
EINVAL
;
}
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000000
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000000
);
if
((
nvkm_rd32
(
device
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
chid
=
nvkm_rd32
(
device
,
0x003204
)
&
(
fifo
->
base
.
nr
-
1
);
if
(
chid
==
chan
->
base
.
chid
)
nvkm_wr32
(
device
,
reg
,
0x00000000
);
nvkm_wr32
(
device
,
reg
,
0x00000000
);
nvkm_kmap
(
imem
->
ramfc
);
nvkm_kmap
(
imem
->
ramfc
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
ctx
,
0x00000000
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
ctx
,
0x00000000
);
...
@@ -72,38 +83,29 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -72,38 +83,29 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
}
}
static
int
static
int
nv40_fifo_context_attach
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engctx
)
nv40_fifo_dma_engine_init
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv04_fifo
_chan
*
chan
=
nv04_fifo_chan
(
base
)
;
struct
nv04_fifo
_chan
*
chan
=
(
void
*
)
parent
;
struct
nv04_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
unsigned
long
flags
;
unsigned
long
flags
;
u32
reg
,
ctx
;
u32
inst
,
reg
,
ctx
;
int
chid
;
switch
(
nv_engidx
(
engctx
->
engine
))
{
if
(
!
nv40_fifo_dma_engine
(
engine
,
&
reg
,
&
ctx
))
case
NVDEV_ENGINE_SW
:
return
0
;
return
0
;
case
NVDEV_ENGINE_GR
:
inst
=
chan
->
engn
[
engine
->
subdev
.
index
]
->
addr
>>
4
;
reg
=
0x32e0
;
ctx
=
0x38
;
break
;
case
NVDEV_ENGINE_MPEG
:
reg
=
0x330c
;
ctx
=
0x54
;
break
;
default:
return
-
EINVAL
;
}
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
nv_engctx
(
engctx
)
->
addr
=
nv_gpuobj
(
engctx
)
->
addr
>>
4
;
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000000
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000000
);
if
((
nvkm_rd32
(
device
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
chid
=
nvkm_rd32
(
device
,
0x003204
)
&
(
fifo
->
base
.
nr
-
1
);
nvkm_wr32
(
device
,
reg
,
nv_engctx
(
engctx
)
->
addr
);
if
(
chid
==
chan
->
base
.
chid
)
nvkm_wr32
(
device
,
reg
,
inst
);
nvkm_kmap
(
imem
->
ramfc
);
nvkm_kmap
(
imem
->
ramfc
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
ctx
,
nv_engctx
(
engctx
)
->
addr
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
ctx
,
inst
);
nvkm_done
(
imem
->
ramfc
);
nvkm_done
(
imem
->
ramfc
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000001
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000001
);
...
@@ -111,57 +113,91 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
...
@@ -111,57 +113,91 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
return
0
;
return
0
;
}
}
static
void
nv40_fifo_dma_engine_dtor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
struct
nv04_fifo_chan
*
chan
=
nv04_fifo_chan
(
base
);
if
(
!
chan
->
engn
[
engine
->
subdev
.
index
]
||
chan
->
engn
[
engine
->
subdev
.
index
]
->
object
.
oclass
)
{
chan
->
engn
[
engine
->
subdev
.
index
]
=
NULL
;
return
;
}
nvkm_gpuobj_del
(
&
chan
->
engn
[
engine
->
subdev
.
index
]);
}
static
int
static
int
nv40_fifo_object_attach
(
struct
nvkm_object
*
parent
,
nv40_fifo_dma_engine_ctor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_object
*
object
,
u32
handle
)
struct
nvkm_engine
*
engine
,
struct
nvkm_object
*
object
)
{
{
struct
nv04_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
nv04_fifo_chan
*
chan
=
nv04_fifo_chan
(
base
);
struct
nv04_fifo_chan
*
chan
=
(
void
*
)
parent
;
const
int
engn
=
engine
->
subdev
.
index
;
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
u32
reg
,
ctx
;
u32
context
,
chid
=
chan
->
base
.
chid
;
int
ret
;
if
(
nv_iclass
(
object
,
NV_GPUOBJ_CLASS
))
if
(
!
nv40_fifo_dma_engine
(
engine
,
&
reg
,
&
ctx
))
context
=
nv_gpuobj
(
object
)
->
addr
>>
4
;
return
0
;
else
context
=
0x00000004
;
/* just non-zero */
if
(
nv_iclass
(
object
,
NV_GPUOBJ_CLASS
))
{
chan
->
engn
[
engn
]
=
nv_gpuobj
(
object
);
if
(
object
->
engine
)
{
return
0
;
switch
(
nv_engidx
(
object
->
engine
))
{
case
NVDEV_ENGINE_DMAOBJ
:
case
NVDEV_ENGINE_SW
:
context
|=
0x00000000
;
break
;
case
NVDEV_ENGINE_GR
:
context
|=
0x00100000
;
break
;
case
NVDEV_ENGINE_MPEG
:
context
|=
0x00200000
;
break
;
default:
return
-
EINVAL
;
}
}
}
context
|=
chid
<<
23
;
return
nvkm_object_bind
(
object
,
NULL
,
0
,
&
chan
->
engn
[
engn
]);
}
mutex_lock
(
&
nv_subdev
(
fifo
)
->
mutex
);
static
int
ret
=
nvkm_ramht_insert
(
imem
->
ramht
,
NULL
,
chid
,
0
,
handle
,
context
);
nv40_fifo_dma_object_ctor
(
struct
nvkm_fifo_chan
*
base
,
mutex_unlock
(
&
nv_subdev
(
fifo
)
->
mutex
);
struct
nvkm_object
*
object
)
return
ret
;
{
struct
nv04_fifo_chan
*
chan
=
nv04_fifo_chan
(
base
);
struct
nvkm_instmem
*
imem
=
chan
->
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
u32
context
=
chan
->
base
.
chid
<<
23
;
u32
handle
=
object
->
handle
;
int
hash
;
switch
(
object
->
engine
->
subdev
.
index
)
{
case
NVDEV_ENGINE_DMAOBJ
:
case
NVDEV_ENGINE_SW
:
context
|=
0x00000000
;
break
;
case
NVDEV_ENGINE_GR
:
context
|=
0x00100000
;
break
;
case
NVDEV_ENGINE_MPEG
:
context
|=
0x00200000
;
break
;
default:
WARN_ON
(
1
);
return
-
EINVAL
;
}
mutex_lock
(
&
chan
->
fifo
->
base
.
engine
.
subdev
.
mutex
);
hash
=
nvkm_ramht_insert
(
imem
->
ramht
,
object
,
chan
->
base
.
chid
,
4
,
handle
,
context
);
mutex_unlock
(
&
chan
->
fifo
->
base
.
engine
.
subdev
.
mutex
);
return
hash
;
}
}
static
const
struct
nvkm_fifo_chan_func
nv40_fifo_dma_func
=
{
.
dtor
=
nv04_fifo_dma_dtor
,
.
init
=
nv04_fifo_dma_init
,
.
fini
=
nv04_fifo_dma_fini
,
.
engine_ctor
=
nv40_fifo_dma_engine_ctor
,
.
engine_dtor
=
nv40_fifo_dma_engine_dtor
,
.
engine_init
=
nv40_fifo_dma_engine_init
,
.
engine_fini
=
nv40_fifo_dma_engine_fini
,
.
object_ctor
=
nv40_fifo_dma_object_ctor
,
.
object_dtor
=
nv04_fifo_dma_object_dtor
,
};
static
int
static
int
nv40_fifo_chan_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nv40_fifo_dma_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_object
*
parent
=
oclass
->
parent
;
union
{
union
{
struct
nv03_channel_dma_v0
v0
;
struct
nv03_channel_dma_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
nv04_fifo
*
fifo
=
(
void
*
)
engine
;
struct
nv04_fifo
*
fifo
=
nv04_fifo
(
base
);
struct
nvkm_instmem
*
imem
=
fifo
->
base
.
engine
.
subdev
.
device
->
imem
;
struct
nv04_fifo_chan
*
chan
=
NULL
;
struct
nv04_fifo_chan
*
chan
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
int
ret
;
int
ret
;
nvif_ioctl
(
parent
,
"create channel dma size %d
\n
"
,
size
);
nvif_ioctl
(
parent
,
"create channel dma size %d
\n
"
,
size
);
...
@@ -169,31 +205,33 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -169,31 +205,33 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nvif_ioctl
(
parent
,
"create channel dma vers %d pushbuf %llx "
nvif_ioctl
(
parent
,
"create channel dma vers %d pushbuf %llx "
"offset %08x
\n
"
,
args
->
v0
.
version
,
"offset %08x
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
if
(
!
args
->
v0
.
pushbuf
)
return
-
EINVAL
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
0
,
0xc00000
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
0x1000
,
args
->
v0
.
pushbuf
,
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
ret
=
nvkm_fifo_chan_ctor
(
&
nv40_fifo_dma_func
,
&
fifo
->
base
,
(
1ULL
<<
NVDEV_ENGINE_MPEG
),
&
chan
);
0x1000
,
0x1000
,
false
,
0
,
args
->
v0
.
pushbuf
,
*
pobject
=
nv_object
(
chan
);
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_MPEG
)
|
(
1ULL
<<
NVDEV_ENGINE_SW
),
0
,
0xc00000
,
0x1000
,
oclass
,
&
chan
->
base
);
chan
->
fifo
=
fifo
;
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
context_attach
=
nv40_fifo_context_attach
;
nv_parent
(
chan
)
->
context_detach
=
nv40_fifo_context_detach
;
nv_parent
(
chan
)
->
object_attach
=
nv40_fifo_object_attach
;
nv_parent
(
chan
)
->
object_detach
=
nv04_fifo_object_detach
;
chan
->
ramfc
=
chan
->
base
.
chid
*
128
;
chan
->
ramfc
=
chan
->
base
.
chid
*
128
;
nvkm_kmap
(
imem
->
ramfc
);
nvkm_kmap
(
imem
->
ramfc
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
push
gpu
->
addr
>>
4
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
push
->
addr
>>
4
);
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x18
,
0x30000000
|
nvkm_wo32
(
imem
->
ramfc
,
chan
->
ramfc
+
0x18
,
0x30000000
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
...
@@ -206,20 +244,10 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -206,20 +244,10 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return
0
;
return
0
;
}
}
static
struct
nvkm_ofuncs
const
struct
nvkm_fifo_chan_oclass
nv40_fifo_ofuncs
=
{
nv40_fifo_dma_oclass
=
{
.
ctor
=
nv40_fifo_chan_ctor
,
.
base
.
oclass
=
NV40_CHANNEL_DMA
,
.
dtor
=
nv04_fifo_chan_dtor
,
.
base
.
minver
=
0
,
.
init
=
nv04_fifo_chan_init
,
.
base
.
maxver
=
0
,
.
fini
=
nv04_fifo_chan_fini
,
.
ctor
=
nv40_fifo_dma_new
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
struct
nvkm_oclass
nv40_fifo_sclass
[]
=
{
{
NV40_CHANNEL_DMA
,
&
nv40_fifo_ofuncs
},
{}
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
View file @
8f0649b5
...
@@ -30,15 +30,14 @@
...
@@ -30,15 +30,14 @@
#include <nvif/unpack.h>
#include <nvif/unpack.h>
static
int
static
int
nv50_fifo_chan_ctor_dma
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nv50_fifo_dma_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_object
*
parent
=
oclass
->
parent
;
union
{
union
{
struct
nv50_channel_dma_v0
v0
;
struct
nv50_channel_dma_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
nvkm_device
*
device
=
parent
->
engine
->
subdev
.
device
;
struct
nv50_fifo
*
fifo
=
nv50_fifo
(
base
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
;
struct
nv50_fifo_chan
*
chan
;
int
ret
;
int
ret
;
...
@@ -48,68 +47,45 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -48,68 +47,45 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
"pushbuf %llx offset %016llx
\n
"
,
"pushbuf %llx offset %016llx
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
args
->
v0
.
offset
);
args
->
v0
.
offset
);
if
(
args
->
v0
.
vm
)
if
(
!
args
->
v0
.
pushbuf
)
return
-
E
NOENT
;
return
-
E
INVAL
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
0
,
0xc00000
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
0x2000
,
args
->
v0
.
pushbuf
,
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
ret
=
nv50_fifo_chan_ctor
(
fifo
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
(
1ULL
<<
NVDEV_ENGINE_MPEG
),
&
chan
);
oclass
,
chan
);
*
pobject
=
nv_object
(
chan
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
chan
->
base
.
inst
=
base
->
base
.
gpuobj
.
addr
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
context_attach
=
nv50_fifo_context_attach
;
nvkm_kmap
(
chan
->
ramfc
);
nv_parent
(
chan
)
->
context_detach
=
nv50_fifo_context_detach
;
nvkm_wo32
(
chan
->
ramfc
,
0x08
,
lower_32_bits
(
args
->
v0
.
offset
));
nv_parent
(
chan
)
->
object_attach
=
nv50_fifo_object_attach
;
nvkm_wo32
(
chan
->
ramfc
,
0x0c
,
upper_32_bits
(
args
->
v0
.
offset
));
nv_parent
(
chan
)
->
object_detach
=
nv50_fifo_object_detach
;
nvkm_wo32
(
chan
->
ramfc
,
0x10
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
chan
->
ramfc
,
0x14
,
upper_32_bits
(
args
->
v0
.
offset
));
ret
=
nvkm_ramht_new
(
device
,
0x8000
,
16
,
&
base
->
base
.
gpuobj
,
nvkm_wo32
(
chan
->
ramfc
,
0x3c
,
0x003f6078
);
&
chan
->
ramht
);
nvkm_wo32
(
chan
->
ramfc
,
0x44
,
0x01003fff
);
if
(
ret
)
nvkm_wo32
(
chan
->
ramfc
,
0x48
,
chan
->
base
.
push
->
node
->
offset
>>
4
);
return
ret
;
nvkm_wo32
(
chan
->
ramfc
,
0x4c
,
0xffffffff
);
nvkm_wo32
(
chan
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_kmap
(
base
->
ramfc
);
nvkm_wo32
(
chan
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x08
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
chan
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x0c
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
chan
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
nvkm_wo32
(
base
->
ramfc
,
0x10
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x14
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x3c
,
0x003f6078
);
nvkm_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nvkm_wo32
(
base
->
ramfc
,
0x4c
,
0xffffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
chan
->
ramht
->
gpuobj
->
node
->
offset
>>
4
));
(
chan
->
ramht
->
gpuobj
->
node
->
offset
>>
4
));
nvkm_done
(
base
->
ramfc
);
nvkm_done
(
chan
->
ramfc
);
return
0
;
return
0
;
}
}
static
struct
nvkm_ofuncs
const
struct
nvkm_fifo_chan_oclass
nv50_fifo_ofuncs_dma
=
{
nv50_fifo_dma_oclass
=
{
.
ctor
=
nv50_fifo_chan_ctor_dma
,
.
base
.
oclass
=
NV50_CHANNEL_DMA
,
.
dtor
=
nv50_fifo_chan_dtor
,
.
base
.
minver
=
0
,
.
init
=
nv50_fifo_chan_init
,
.
base
.
maxver
=
0
,
.
fini
=
nv50_fifo_chan_fini
,
.
ctor
=
nv50_fifo_dma_new
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
struct
nvkm_oclass
nv50_fifo_sclass
[]
=
{
{
NV50_CHANNEL_DMA
,
&
nv50_fifo_ofuncs_dma
},
{
NV50_CHANNEL_GPFIFO
,
&
nv50_fifo_ofuncs_ind
},
{}
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
View file @
8f0649b5
...
@@ -47,6 +47,15 @@ g84_fifo_uevent_func = {
...
@@ -47,6 +47,15 @@ g84_fifo_uevent_func = {
.
fini
=
g84_fifo_uevent_fini
,
.
fini
=
g84_fifo_uevent_fini
,
};
};
static
const
struct
nvkm_fifo_func
g84_fifo_func
=
{
.
chan
=
{
&
g84_fifo_dma_oclass
,
&
g84_fifo_gpfifo_oclass
,
NULL
},
};
static
int
static
int
g84_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
g84_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
...
@@ -61,6 +70,8 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -61,6 +70,8 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
base
.
func
=
&
g84_fifo_func
;
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
128
*
4
,
0x1000
,
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
128
*
4
,
0x1000
,
false
,
&
fifo
->
runlist
[
0
]);
false
,
&
fifo
->
runlist
[
0
]);
if
(
ret
)
if
(
ret
)
...
@@ -77,8 +88,6 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -77,8 +88,6 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_engine
(
fifo
)
->
cclass
=
&
g84_fifo_cclass
;
nv_engine
(
fifo
)
->
sclass
=
g84_fifo_sclass
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
base
.
start
=
nv04_fifo_start
;
return
0
;
return
0
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
View file @
8f0649b5
...
@@ -58,28 +58,26 @@ gf100_fifo_uevent_func = {
...
@@ -58,28 +58,26 @@ gf100_fifo_uevent_func = {
void
void
gf100_fifo_runlist_update
(
struct
gf100_fifo
*
fifo
)
gf100_fifo_runlist_update
(
struct
gf100_fifo
*
fifo
)
{
{
struct
gf100_fifo_chan
*
chan
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_memory
*
cur
;
struct
nvkm_memory
*
cur
;
int
i
,
p
;
int
nr
=
0
;
mutex_lock
(
&
nv_subdev
(
fifo
)
->
mutex
);
mutex_lock
(
&
nv_subdev
(
fifo
)
->
mutex
);
cur
=
fifo
->
runlist
.
mem
[
fifo
->
runlist
.
active
];
cur
=
fifo
->
runlist
.
mem
[
fifo
->
runlist
.
active
];
fifo
->
runlist
.
active
=
!
fifo
->
runlist
.
active
;
fifo
->
runlist
.
active
=
!
fifo
->
runlist
.
active
;
nvkm_kmap
(
cur
);
nvkm_kmap
(
cur
);
for
(
i
=
0
,
p
=
0
;
i
<
128
;
i
++
)
{
list_for_each_entry
(
chan
,
&
fifo
->
chan
,
head
)
{
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
fifo
->
base
.
channel
[
i
];
nvkm_wo32
(
cur
,
(
nr
*
8
)
+
0
,
chan
->
base
.
chid
);
if
(
chan
&&
chan
->
state
==
RUNNING
)
{
nvkm_wo32
(
cur
,
(
nr
*
8
)
+
4
,
0x00000004
);
nvkm_wo32
(
cur
,
p
+
0
,
i
);
nr
++
;
nvkm_wo32
(
cur
,
p
+
4
,
0x00000004
);
p
+=
8
;
}
}
}
nvkm_done
(
cur
);
nvkm_done
(
cur
);
nvkm_wr32
(
device
,
0x002270
,
nvkm_memory_addr
(
cur
)
>>
12
);
nvkm_wr32
(
device
,
0x002270
,
nvkm_memory_addr
(
cur
)
>>
12
);
nvkm_wr32
(
device
,
0x002274
,
0x01f00000
|
(
p
>>
3
)
);
nvkm_wr32
(
device
,
0x002274
,
0x01f00000
|
nr
);
if
(
wait_event_timeout
(
fifo
->
runlist
.
wait
,
if
(
wait_event_timeout
(
fifo
->
runlist
.
wait
,
!
(
nvkm_rd32
(
device
,
0x00227c
)
&
0x00100000
),
!
(
nvkm_rd32
(
device
,
0x00227c
)
&
0x00100000
),
...
@@ -166,7 +164,8 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
...
@@ -166,7 +164,8 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
assert_spin_locked
(
&
fifo
->
base
.
lock
);
assert_spin_locked
(
&
fifo
->
base
.
lock
);
nvkm_mask
(
device
,
0x003004
+
(
chid
*
0x08
),
0x00000001
,
0x00000000
);
nvkm_mask
(
device
,
0x003004
+
(
chid
*
0x08
),
0x00000001
,
0x00000000
);
chan
->
state
=
KILLED
;
list_del_init
(
&
chan
->
head
);
chan
->
killed
=
true
;
fifo
->
mask
|=
1ULL
<<
nv_engidx
(
engine
);
fifo
->
mask
|=
1ULL
<<
nv_engidx
(
engine
);
schedule_work
(
&
fifo
->
fault
);
schedule_work
(
&
fifo
->
fault
);
...
@@ -198,11 +197,15 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
...
@@ -198,11 +197,15 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
(
void
)
save
;
(
void
)
save
;
if
(
busy
&&
unk0
&&
unk1
)
{
if
(
busy
&&
unk0
&&
unk1
)
{
if
(
!
(
chan
=
(
void
*
)
fifo
->
base
.
channel
[
chid
]))
list_for_each_entry
(
chan
,
&
fifo
->
chan
,
head
)
{
continue
;
if
(
chan
->
base
.
chid
==
chid
)
{
if
(
!
(
engine
=
gf100_fifo_engine
(
fifo
,
engn
)))
engine
=
gf100_fifo_engine
(
fifo
,
engn
);
continue
;
if
(
!
engine
)
gf100_fifo_recover
(
fifo
,
engine
,
chan
);
break
;
gf100_fifo_recover
(
fifo
,
engine
,
chan
);
break
;
}
}
}
}
}
}
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
...
@@ -343,7 +346,8 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
...
@@ -343,7 +346,8 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
write
?
"write"
:
"read"
,
(
u64
)
vahi
<<
32
|
valo
,
write
?
"write"
:
"read"
,
(
u64
)
vahi
<<
32
|
valo
,
unit
,
eu
?
eu
->
name
:
""
,
client
,
gpcid
,
ec
?
ec
->
name
:
""
,
unit
,
eu
?
eu
->
name
:
""
,
client
,
gpcid
,
ec
?
ec
->
name
:
""
,
reason
,
er
?
er
->
name
:
""
,
chan
?
chan
->
chid
:
-
1
,
reason
,
er
?
er
->
name
:
""
,
chan
?
chan
->
chid
:
-
1
,
(
u64
)
inst
<<
12
,
nvkm_client_name
(
chan
));
(
u64
)
inst
<<
12
,
chan
?
chan
->
object
.
client
->
name
:
"unknown"
);
if
(
engine
&&
chan
)
if
(
engine
&&
chan
)
gf100_fifo_recover
(
fifo
,
engine
,
(
void
*
)
chan
);
gf100_fifo_recover
(
fifo
,
engine
,
(
void
*
)
chan
);
...
@@ -369,6 +373,8 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
...
@@ -369,6 +373,8 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
u32
chid
=
nvkm_rd32
(
device
,
0x040120
+
(
unit
*
0x2000
))
&
0x7f
;
u32
chid
=
nvkm_rd32
(
device
,
0x040120
+
(
unit
*
0x2000
))
&
0x7f
;
u32
subc
=
(
addr
&
0x00070000
)
>>
16
;
u32
subc
=
(
addr
&
0x00070000
)
>>
16
;
u32
mthd
=
(
addr
&
0x00003ffc
);
u32
mthd
=
(
addr
&
0x00003ffc
);
struct
nvkm_fifo_chan
*
chan
;
unsigned
long
flags
;
u32
show
=
stat
;
u32
show
=
stat
;
char
msg
[
128
];
char
msg
[
128
];
...
@@ -381,11 +387,13 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
...
@@ -381,11 +387,13 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
if
(
show
)
{
if
(
show
)
{
nvkm_snprintbf
(
msg
,
sizeof
(
msg
),
gf100_fifo_pbdma_intr
,
show
);
nvkm_snprintbf
(
msg
,
sizeof
(
msg
),
gf100_fifo_pbdma_intr
,
show
);
nvkm_error
(
subdev
,
"PBDMA%d: %08x [%s] ch %d [%s] subc %d "
chan
=
nvkm_fifo_chan_chid
(
&
fifo
->
base
,
chid
,
&
flags
);
"mthd %04x data %08x
\n
"
,
nvkm_error
(
subdev
,
"PBDMA%d: %08x [%s] ch %d [%010llx %s] "
unit
,
show
,
msg
,
chid
,
"subc %d mthd %04x data %08x
\n
"
,
nvkm_client_name_for_fifo_chid
(
&
fifo
->
base
,
chid
),
unit
,
show
,
msg
,
chid
,
chan
?
chan
->
inst
->
addr
:
0
,
chan
?
chan
->
object
.
client
->
name
:
"unknown"
,
subc
,
mthd
,
data
);
subc
,
mthd
,
data
);
nvkm_fifo_chan_put
(
&
fifo
->
base
,
flags
,
&
chan
);
}
}
nvkm_wr32
(
device
,
0x0400c0
+
(
unit
*
0x2000
),
0x80600008
);
nvkm_wr32
(
device
,
0x0400c0
+
(
unit
*
0x2000
),
0x80600008
);
...
@@ -579,6 +587,14 @@ gf100_fifo_dtor(struct nvkm_object *object)
...
@@ -579,6 +587,14 @@ gf100_fifo_dtor(struct nvkm_object *object)
nvkm_fifo_destroy
(
&
fifo
->
base
);
nvkm_fifo_destroy
(
&
fifo
->
base
);
}
}
static
const
struct
nvkm_fifo_func
gf100_fifo_func
=
{
.
chan
=
{
&
gf100_fifo_gpfifo_oclass
,
NULL
},
};
static
int
static
int
gf100_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
gf100_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
...
@@ -594,6 +610,9 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -594,6 +610,9 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
base
.
func
=
&
gf100_fifo_func
;
INIT_LIST_HEAD
(
&
fifo
->
chan
);
INIT_WORK
(
&
fifo
->
fault
,
gf100_fifo_recover_work
);
INIT_WORK
(
&
fifo
->
fault
,
gf100_fifo_recover_work
);
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
0x1000
,
0x1000
,
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
0x1000
,
0x1000
,
...
@@ -625,8 +644,6 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -625,8 +644,6 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
intr
=
gf100_fifo_intr
;
nv_subdev
(
fifo
)
->
intr
=
gf100_fifo_intr
;
nv_engine
(
fifo
)
->
cclass
=
&
gf100_fifo_cclass
;
nv_engine
(
fifo
)
->
sclass
=
gf100_fifo_sclass
;
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
View file @
8f0649b5
#ifndef __GF100_FIFO_H__
#ifndef __GF100_FIFO_H__
#define __GF100_FIFO_H__
#define __GF100_FIFO_H__
#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
#include "priv.h"
#include "priv.h"
#include <subdev/mmu.h>
struct
gf100_fifo
{
struct
gf100_fifo
{
struct
nvkm_fifo
base
;
struct
nvkm_fifo
base
;
struct
list_head
chan
;
struct
work_struct
fault
;
struct
work_struct
fault
;
u64
mask
;
u64
mask
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
View file @
8f0649b5
...
@@ -32,23 +32,6 @@
...
@@ -32,23 +32,6 @@
#include <nvif/class.h>
#include <nvif/class.h>
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static
const
struct
{
u64
subdev
;
u64
mask
;
}
fifo_engine
[]
=
{
_
(
NVDEV_ENGINE_GR
,
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_CE2
)),
_
(
NVDEV_ENGINE_MSPDEC
,
0
),
_
(
NVDEV_ENGINE_MSPPP
,
0
),
_
(
NVDEV_ENGINE_MSVLD
,
0
),
_
(
NVDEV_ENGINE_CE0
,
0
),
_
(
NVDEV_ENGINE_CE1
,
0
),
_
(
NVDEV_ENGINE_MSENC
,
0
),
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
static
void
static
void
gk104_fifo_uevent_fini
(
struct
nvkm_event
*
event
,
int
type
,
int
index
)
gk104_fifo_uevent_fini
(
struct
nvkm_event
*
event
,
int
type
,
int
index
)
{
{
...
@@ -76,28 +59,26 @@ void
...
@@ -76,28 +59,26 @@ void
gk104_fifo_runlist_update
(
struct
gk104_fifo
*
fifo
,
u32
engine
)
gk104_fifo_runlist_update
(
struct
gk104_fifo
*
fifo
,
u32
engine
)
{
{
struct
gk104_fifo_engn
*
engn
=
&
fifo
->
engine
[
engine
];
struct
gk104_fifo_engn
*
engn
=
&
fifo
->
engine
[
engine
];
struct
gk104_fifo_chan
*
chan
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_memory
*
cur
;
struct
nvkm_memory
*
cur
;
int
i
,
p
;
int
nr
=
0
;
mutex_lock
(
&
nv_subdev
(
fifo
)
->
mutex
);
mutex_lock
(
&
nv_subdev
(
fifo
)
->
mutex
);
cur
=
engn
->
runlist
[
engn
->
cur_runlist
];
cur
=
engn
->
runlist
[
engn
->
cur_runlist
];
engn
->
cur_runlist
=
!
engn
->
cur_runlist
;
engn
->
cur_runlist
=
!
engn
->
cur_runlist
;
nvkm_kmap
(
cur
);
nvkm_kmap
(
cur
);
for
(
i
=
0
,
p
=
0
;
i
<
fifo
->
base
.
max
;
i
++
)
{
list_for_each_entry
(
chan
,
&
engn
->
chan
,
head
)
{
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
fifo
->
base
.
channel
[
i
];
nvkm_wo32
(
cur
,
(
nr
*
8
)
+
0
,
chan
->
base
.
chid
);
if
(
chan
&&
chan
->
state
==
RUNNING
&&
chan
->
engine
==
engine
)
{
nvkm_wo32
(
cur
,
(
nr
*
8
)
+
4
,
0x00000000
);
nvkm_wo32
(
cur
,
p
+
0
,
i
);
nr
++
;
nvkm_wo32
(
cur
,
p
+
4
,
0x00000000
);
p
+=
8
;
}
}
}
nvkm_done
(
cur
);
nvkm_done
(
cur
);
nvkm_wr32
(
device
,
0x002270
,
nvkm_memory_addr
(
cur
)
>>
12
);
nvkm_wr32
(
device
,
0x002270
,
nvkm_memory_addr
(
cur
)
>>
12
);
nvkm_wr32
(
device
,
0x002274
,
(
engine
<<
20
)
|
(
p
>>
3
)
);
nvkm_wr32
(
device
,
0x002274
,
(
engine
<<
20
)
|
nr
);
if
(
wait_event_timeout
(
engn
->
wait
,
!
(
nvkm_rd32
(
device
,
0x002284
+
if
(
wait_event_timeout
(
engn
->
wait
,
!
(
nvkm_rd32
(
device
,
0x002284
+
(
engine
*
0x08
))
&
0x00100000
),
(
engine
*
0x08
))
&
0x00100000
),
...
@@ -106,31 +87,13 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
...
@@ -106,31 +87,13 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
mutex_unlock
(
&
nv_subdev
(
fifo
)
->
mutex
);
mutex_unlock
(
&
nv_subdev
(
fifo
)
->
mutex
);
}
}
static
inline
int
gk104_fifo_engidx
(
struct
gk104_fifo
*
fifo
,
u32
engn
)
{
switch
(
engn
)
{
case
NVDEV_ENGINE_GR
:
case
NVDEV_ENGINE_CE2
:
engn
=
0
;
break
;
case
NVDEV_ENGINE_MSVLD
:
engn
=
1
;
break
;
case
NVDEV_ENGINE_MSPPP
:
engn
=
2
;
break
;
case
NVDEV_ENGINE_MSPDEC
:
engn
=
3
;
break
;
case
NVDEV_ENGINE_CE0
:
engn
=
4
;
break
;
case
NVDEV_ENGINE_CE1
:
engn
=
5
;
break
;
case
NVDEV_ENGINE_MSENC
:
engn
=
6
;
break
;
default:
return
-
1
;
}
return
engn
;
}
static
inline
struct
nvkm_engine
*
static
inline
struct
nvkm_engine
*
gk104_fifo_engine
(
struct
gk104_fifo
*
fifo
,
u32
engn
)
gk104_fifo_engine
(
struct
gk104_fifo
*
fifo
,
u32
engn
)
{
{
if
(
engn
>=
ARRAY_SIZE
(
fifo_engine
))
u64
subdevs
=
gk104_fifo_engine_subdev
(
engn
);
return
NULL
;
if
(
subdevs
)
return
nvkm_engine
(
fifo
,
fifo_engine
[
engn
].
subdev
);
return
nvkm_engine
(
fifo
,
__ffs
(
subdevs
));
return
NULL
;
}
}
static
void
static
void
...
@@ -149,7 +112,7 @@ gk104_fifo_recover_work(struct work_struct *work)
...
@@ -149,7 +112,7 @@ gk104_fifo_recover_work(struct work_struct *work)
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
for
(
todo
=
mask
;
engn
=
__ffs64
(
todo
),
todo
;
todo
&=
~
(
1
<<
engn
))
for
(
todo
=
mask
;
engn
=
__ffs64
(
todo
),
todo
;
todo
&=
~
(
1
<<
engn
))
engm
|=
1
<<
gk104_fifo_
engidx
(
fifo
,
engn
);
engm
|=
1
<<
gk104_fifo_
subdev_engine
(
engn
);
nvkm_mask
(
device
,
0x002630
,
engm
,
engm
);
nvkm_mask
(
device
,
0x002630
,
engm
,
engm
);
for
(
todo
=
mask
;
engn
=
__ffs64
(
todo
),
todo
;
todo
&=
~
(
1
<<
engn
))
{
for
(
todo
=
mask
;
engn
=
__ffs64
(
todo
),
todo
;
todo
&=
~
(
1
<<
engn
))
{
...
@@ -157,7 +120,7 @@ gk104_fifo_recover_work(struct work_struct *work)
...
@@ -157,7 +120,7 @@ gk104_fifo_recover_work(struct work_struct *work)
nvkm_subdev_fini
(
&
engine
->
subdev
,
false
);
nvkm_subdev_fini
(
&
engine
->
subdev
,
false
);
WARN_ON
(
nvkm_subdev_init
(
&
engine
->
subdev
));
WARN_ON
(
nvkm_subdev_init
(
&
engine
->
subdev
));
}
}
gk104_fifo_runlist_update
(
fifo
,
gk104_fifo_
engidx
(
fifo
,
engn
));
gk104_fifo_runlist_update
(
fifo
,
gk104_fifo_
subdev_engine
(
engn
));
}
}
nvkm_wr32
(
device
,
0x00262c
,
engm
);
nvkm_wr32
(
device
,
0x00262c
,
engm
);
...
@@ -177,7 +140,8 @@ gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
...
@@ -177,7 +140,8 @@ gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
assert_spin_locked
(
&
fifo
->
base
.
lock
);
assert_spin_locked
(
&
fifo
->
base
.
lock
);
nvkm_mask
(
device
,
0x800004
+
(
chid
*
0x08
),
0x00000800
,
0x00000800
);
nvkm_mask
(
device
,
0x800004
+
(
chid
*
0x08
),
0x00000800
,
0x00000800
);
chan
->
state
=
KILLED
;
list_del_init
(
&
chan
->
head
);
chan
->
killed
=
true
;
fifo
->
mask
|=
1ULL
<<
nv_engidx
(
engine
);
fifo
->
mask
|=
1ULL
<<
nv_engidx
(
engine
);
schedule_work
(
&
fifo
->
fault
);
schedule_work
(
&
fifo
->
fault
);
...
@@ -223,7 +187,7 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
...
@@ -223,7 +187,7 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
u32
engn
;
u32
engn
;
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
spin_lock_irqsave
(
&
fifo
->
base
.
lock
,
flags
);
for
(
engn
=
0
;
engn
<
ARRAY_SIZE
(
fifo
_
engine
);
engn
++
)
{
for
(
engn
=
0
;
engn
<
ARRAY_SIZE
(
fifo
->
engine
);
engn
++
)
{
u32
stat
=
nvkm_rd32
(
device
,
0x002640
+
(
engn
*
0x04
));
u32
stat
=
nvkm_rd32
(
device
,
0x002640
+
(
engn
*
0x04
));
u32
busy
=
(
stat
&
0x80000000
);
u32
busy
=
(
stat
&
0x80000000
);
u32
next
=
(
stat
&
0x07ff0000
)
>>
16
;
u32
next
=
(
stat
&
0x07ff0000
)
>>
16
;
...
@@ -235,11 +199,15 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
...
@@ -235,11 +199,15 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
(
void
)
save
;
(
void
)
save
;
if
(
busy
&&
chsw
)
{
if
(
busy
&&
chsw
)
{
if
(
!
(
chan
=
(
void
*
)
fifo
->
base
.
channel
[
chid
]))
list_for_each_entry
(
chan
,
&
fifo
->
engine
[
engn
].
chan
,
head
)
{
continue
;
if
(
chan
->
base
.
chid
==
chid
)
{
if
(
!
(
engine
=
gk104_fifo_engine
(
fifo
,
engn
)))
engine
=
gk104_fifo_engine
(
fifo
,
engn
);
continue
;
if
(
!
engine
)
gk104_fifo_recover
(
fifo
,
engine
,
chan
);
break
;
gk104_fifo_recover
(
fifo
,
engine
,
chan
);
break
;
}
}
}
}
}
}
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
...
@@ -444,7 +412,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
...
@@ -444,7 +412,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
write
?
"write"
:
"read"
,
(
u64
)
vahi
<<
32
|
valo
,
write
?
"write"
:
"read"
,
(
u64
)
vahi
<<
32
|
valo
,
unit
,
eu
?
eu
->
name
:
""
,
client
,
gpcid
,
ec
?
ec
->
name
:
""
,
unit
,
eu
?
eu
->
name
:
""
,
client
,
gpcid
,
ec
?
ec
->
name
:
""
,
reason
,
er
?
er
->
name
:
""
,
chan
?
chan
->
chid
:
-
1
,
reason
,
er
?
er
->
name
:
""
,
chan
?
chan
->
chid
:
-
1
,
(
u64
)
inst
<<
12
,
nvkm_client_name
(
chan
));
(
u64
)
inst
<<
12
,
chan
?
chan
->
object
.
client
->
name
:
"unknown"
);
if
(
engine
&&
chan
)
if
(
engine
&&
chan
)
gk104_fifo_recover
(
fifo
,
engine
,
(
void
*
)
chan
);
gk104_fifo_recover
(
fifo
,
engine
,
(
void
*
)
chan
);
...
@@ -498,6 +467,8 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
...
@@ -498,6 +467,8 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
u32
subc
=
(
addr
&
0x00070000
)
>>
16
;
u32
subc
=
(
addr
&
0x00070000
)
>>
16
;
u32
mthd
=
(
addr
&
0x00003ffc
);
u32
mthd
=
(
addr
&
0x00003ffc
);
u32
show
=
stat
;
u32
show
=
stat
;
struct
nvkm_fifo_chan
*
chan
;
unsigned
long
flags
;
char
msg
[
128
];
char
msg
[
128
];
if
(
stat
&
0x00800000
)
{
if
(
stat
&
0x00800000
)
{
...
@@ -510,11 +481,13 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
...
@@ -510,11 +481,13 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
if
(
show
)
{
if
(
show
)
{
nvkm_snprintbf
(
msg
,
sizeof
(
msg
),
gk104_fifo_pbdma_intr_0
,
show
);
nvkm_snprintbf
(
msg
,
sizeof
(
msg
),
gk104_fifo_pbdma_intr_0
,
show
);
nvkm_error
(
subdev
,
"PBDMA%d: %08x [%s] ch %d [%s] subc %d "
chan
=
nvkm_fifo_chan_chid
(
&
fifo
->
base
,
chid
,
&
flags
);
"mthd %04x data %08x
\n
"
,
nvkm_error
(
subdev
,
"PBDMA%d: %08x [%s] ch %d [%010llx %s] "
unit
,
show
,
msg
,
chid
,
"subc %d mthd %04x data %08x
\n
"
,
nvkm_client_name_for_fifo_chid
(
&
fifo
->
base
,
chid
),
unit
,
show
,
msg
,
chid
,
chan
?
chan
->
inst
->
addr
:
0
,
chan
?
chan
->
object
.
client
->
name
:
"unknown"
,
subc
,
mthd
,
data
);
subc
,
mthd
,
data
);
nvkm_fifo_chan_put
(
&
fifo
->
base
,
flags
,
&
chan
);
}
}
nvkm_wr32
(
device
,
0x040108
+
(
unit
*
0x2000
),
stat
);
nvkm_wr32
(
device
,
0x040108
+
(
unit
*
0x2000
),
stat
);
...
@@ -722,7 +695,7 @@ gk104_fifo_dtor(struct nvkm_object *object)
...
@@ -722,7 +695,7 @@ gk104_fifo_dtor(struct nvkm_object *object)
nvkm_vm_put
(
&
fifo
->
user
.
bar
);
nvkm_vm_put
(
&
fifo
->
user
.
bar
);
nvkm_memory_del
(
&
fifo
->
user
.
mem
);
nvkm_memory_del
(
&
fifo
->
user
.
mem
);
for
(
i
=
0
;
i
<
FIFO_ENGINE_NR
;
i
++
)
{
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
fifo
->
engine
)
;
i
++
)
{
nvkm_memory_del
(
&
fifo
->
engine
[
i
].
runlist
[
1
]);
nvkm_memory_del
(
&
fifo
->
engine
[
i
].
runlist
[
1
]);
nvkm_memory_del
(
&
fifo
->
engine
[
i
].
runlist
[
0
]);
nvkm_memory_del
(
&
fifo
->
engine
[
i
].
runlist
[
0
]);
}
}
...
@@ -730,6 +703,14 @@ gk104_fifo_dtor(struct nvkm_object *object)
...
@@ -730,6 +703,14 @@ gk104_fifo_dtor(struct nvkm_object *object)
nvkm_fifo_destroy
(
&
fifo
->
base
);
nvkm_fifo_destroy
(
&
fifo
->
base
);
}
}
static
const
struct
nvkm_fifo_func
gk104_fifo_func
=
{
.
chan
=
{
&
gk104_fifo_gpfifo_oclass
,
NULL
},
};
int
int
gk104_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
gk104_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
...
@@ -747,9 +728,11 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -747,9 +728,11 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
base
.
func
=
&
gk104_fifo_func
;
INIT_WORK
(
&
fifo
->
fault
,
gk104_fifo_recover_work
);
INIT_WORK
(
&
fifo
->
fault
,
gk104_fifo_recover_work
);
for
(
i
=
0
;
i
<
FIFO_ENGINE_NR
;
i
++
)
{
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
fifo
->
engine
)
;
i
++
)
{
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
0x8000
,
0x1000
,
false
,
0x8000
,
0x1000
,
false
,
&
fifo
->
engine
[
i
].
runlist
[
0
]);
&
fifo
->
engine
[
i
].
runlist
[
0
]);
...
@@ -763,6 +746,7 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -763,6 +746,7 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return
ret
;
return
ret
;
init_waitqueue_head
(
&
fifo
->
engine
[
i
].
wait
);
init_waitqueue_head
(
&
fifo
->
engine
[
i
].
wait
);
INIT_LIST_HEAD
(
&
fifo
->
engine
[
i
].
chan
);
}
}
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
...
@@ -783,8 +767,6 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -783,8 +767,6 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
intr
=
gk104_fifo_intr
;
nv_subdev
(
fifo
)
->
intr
=
gk104_fifo_intr
;
nv_engine
(
fifo
)
->
cclass
=
&
gk104_fifo_cclass
;
nv_engine
(
fifo
)
->
sclass
=
gk104_fifo_sclass
;
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
View file @
8f0649b5
#ifndef __GK104_FIFO_H__
#ifndef __GK104_FIFO_H__
#define __GK104_FIFO_H__
#define __GK104_FIFO_H__
#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
#include "priv.h"
#include "priv.h"
#include <subdev/mmu.h>
struct
gk104_fifo_engn
{
struct
gk104_fifo_engn
{
struct
nvkm_memory
*
runlist
[
2
];
struct
nvkm_memory
*
runlist
[
2
];
int
cur_runlist
;
int
cur_runlist
;
wait_queue_head_t
wait
;
wait_queue_head_t
wait
;
struct
list_head
chan
;
};
};
struct
gk104_fifo
{
struct
gk104_fifo
{
...
@@ -38,4 +42,42 @@ void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
...
@@ -38,4 +42,42 @@ void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
int
gm204_fifo_ctor
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
int
gm204_fifo_ctor
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
struct
nvkm_oclass
*
,
void
*
,
u32
,
struct
nvkm_oclass
*
,
void
*
,
u32
,
struct
nvkm_object
**
);
struct
nvkm_object
**
);
static
inline
u64
gk104_fifo_engine_subdev
(
int
engine
)
{
switch
(
engine
)
{
case
0
:
return
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_CE2
);
case
1
:
return
(
1ULL
<<
NVDEV_ENGINE_MSPDEC
);
case
2
:
return
(
1ULL
<<
NVDEV_ENGINE_MSPPP
);
case
3
:
return
(
1ULL
<<
NVDEV_ENGINE_MSVLD
);
case
4
:
return
(
1ULL
<<
NVDEV_ENGINE_CE0
);
case
5
:
return
(
1ULL
<<
NVDEV_ENGINE_CE1
);
case
6
:
return
(
1ULL
<<
NVDEV_ENGINE_MSENC
);
default:
WARN_ON
(
1
);
return
0
;
}
}
static
inline
int
gk104_fifo_subdev_engine
(
int
subdev
)
{
switch
(
subdev
)
{
case
NVDEV_ENGINE_GR
:
case
NVDEV_ENGINE_SW
:
case
NVDEV_ENGINE_CE2
:
return
0
;
case
NVDEV_ENGINE_MSPDEC
:
return
1
;
case
NVDEV_ENGINE_MSPPP
:
return
2
;
case
NVDEV_ENGINE_MSVLD
:
return
3
;
case
NVDEV_ENGINE_CE0
:
return
4
;
case
NVDEV_ENGINE_CE1
:
return
5
;
case
NVDEV_ENGINE_MSENC
:
return
6
;
default:
WARN_ON
(
1
);
return
0
;
}
}
#endif
#endif
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
View file @
8f0649b5
...
@@ -24,6 +24,14 @@
...
@@ -24,6 +24,14 @@
#include "gk104.h"
#include "gk104.h"
#include "changk104.h"
#include "changk104.h"
static
const
struct
nvkm_fifo_func
gm204_fifo_func
=
{
.
chan
=
{
&
gm204_fifo_gpfifo_oclass
,
NULL
},
};
int
int
gm204_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
gm204_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
...
@@ -32,7 +40,7 @@ gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -32,7 +40,7 @@ gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
int
ret
=
gk104_fifo_ctor
(
parent
,
engine
,
oclass
,
data
,
size
,
pobject
);
int
ret
=
gk104_fifo_ctor
(
parent
,
engine
,
oclass
,
data
,
size
,
pobject
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
struct
gk104_fifo
*
fifo
=
(
void
*
)
*
pobject
;
struct
gk104_fifo
*
fifo
=
(
void
*
)
*
pobject
;
nv_engine
(
fifo
)
->
sclass
=
gm204_fifo_sclass
;
fifo
->
base
.
func
=
&
gm204_fifo_func
;
}
}
return
ret
;
return
ret
;
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
View file @
8f0649b5
...
@@ -30,15 +30,14 @@
...
@@ -30,15 +30,14 @@
#include <nvif/unpack.h>
#include <nvif/unpack.h>
static
int
static
int
g84_fifo_chan_ctor_ind
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
g84_fifo_gpfifo_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_object
*
parent
=
oclass
->
parent
;
union
{
union
{
struct
nv50_channel_gpfifo_v0
v0
;
struct
nv50_channel_gpfifo_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
nvkm_device
*
device
=
parent
->
engine
->
subdev
.
device
;
struct
nv50_fifo
*
fifo
=
nv50_fifo
(
base
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
;
struct
nv50_fifo_chan
*
chan
;
u64
ioffset
,
ilength
;
u64
ioffset
,
ilength
;
int
ret
;
int
ret
;
...
@@ -50,73 +49,46 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -50,73 +49,46 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
"ilength %08x
\n
"
,
"ilength %08x
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
args
->
v0
.
ioffset
,
args
->
v0
.
ilength
);
args
->
v0
.
ioffset
,
args
->
v0
.
ilength
);
if
(
args
->
v0
.
vm
)
if
(
!
args
->
v0
.
pushbuf
)
return
-
E
NOENT
;
return
-
E
INVAL
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
0
,
0xc00000
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
0x2000
,
args
->
v0
.
pushbuf
,
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_MPEG
)
|
(
1ULL
<<
NVDEV_ENGINE_ME
)
|
(
1ULL
<<
NVDEV_ENGINE_VP
)
|
(
1ULL
<<
NVDEV_ENGINE_CIPHER
)
|
(
1ULL
<<
NVDEV_ENGINE_SEC
)
|
(
1ULL
<<
NVDEV_ENGINE_BSP
)
|
(
1ULL
<<
NVDEV_ENGINE_MSVLD
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPDEC
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPPP
)
|
(
1ULL
<<
NVDEV_ENGINE_CE0
)
|
(
1ULL
<<
NVDEV_ENGINE_VIC
),
&
chan
);
*
pobject
=
nv_object
(
chan
);
if
(
ret
)
return
ret
;
chan
->
base
.
inst
=
base
->
base
.
gpuobj
.
addr
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
ret
=
nvkm_ramht_new
(
device
,
0x8000
,
16
,
&
base
->
base
.
gpuobj
,
ret
=
g84_fifo_chan_ctor
(
fifo
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
&
chan
->
ramht
);
oclass
,
chan
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
nv_parent
(
chan
)
->
context_attach
=
g84_fifo_context_attach
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
context_detach
=
g84_fifo_context_detach
;
nv_parent
(
chan
)
->
object_attach
=
g84_fifo_object_attach
;
nv_parent
(
chan
)
->
object_detach
=
nv50_fifo_object_detach
;
ioffset
=
args
->
v0
.
ioffset
;
ioffset
=
args
->
v0
.
ioffset
;
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
nvkm_kmap
(
base
->
ramfc
);
nvkm_kmap
(
chan
->
ramfc
);
nvkm_wo32
(
base
->
ramfc
,
0x3c
,
0x403f6078
);
nvkm_wo32
(
chan
->
ramfc
,
0x3c
,
0x403f6078
);
nvkm_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
chan
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nvkm_wo32
(
chan
->
ramfc
,
0x48
,
chan
->
base
.
push
->
node
->
offset
>>
4
);
nvkm_wo32
(
base
->
ramfc
,
0x50
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
chan
->
ramfc
,
0x50
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
base
->
ramfc
,
0x54
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
chan
->
ramfc
,
0x54
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
chan
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
chan
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
chan
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
nvkm_wo32
(
chan
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
chan
->
ramht
->
gpuobj
->
node
->
offset
>>
4
));
(
chan
->
ramht
->
gpuobj
->
node
->
offset
>>
4
));
nvkm_wo32
(
base
->
ramfc
,
0x88
,
base
->
cache
->
addr
>>
10
);
nvkm_wo32
(
chan
->
ramfc
,
0x88
,
chan
->
cache
->
addr
>>
10
);
nvkm_wo32
(
base
->
ramfc
,
0x98
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
nvkm_wo32
(
chan
->
ramfc
,
0x98
,
chan
->
base
.
inst
->
addr
>>
12
);
nvkm_done
(
base
->
ramfc
);
nvkm_done
(
chan
->
ramfc
);
return
0
;
return
0
;
}
}
struct
nvkm_ofuncs
const
struct
nvkm_fifo_chan_oclass
g84_fifo_ofuncs_ind
=
{
g84_fifo_gpfifo_oclass
=
{
.
ctor
=
g84_fifo_chan_ctor_ind
,
.
base
.
oclass
=
G82_CHANNEL_GPFIFO
,
.
dtor
=
nv50_fifo_chan_dtor
,
.
base
.
minver
=
0
,
.
init
=
g84_fifo_chan_init
,
.
base
.
maxver
=
0
,
.
fini
=
nv50_fifo_chan_fini
,
.
ctor
=
g84_fifo_gpfifo_new
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
View file @
8f0649b5
...
@@ -30,29 +30,33 @@
...
@@ -30,29 +30,33 @@
#include <nvif/class.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
#include <nvif/unpack.h>
static
int
static
u32
gf100_fifo_context_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
gf100_fifo_gpfifo_engine_addr
(
struct
nvkm_engine
*
engine
)
struct
nvkm_object
*
object
)
{
{
struct
gf100_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
switch
(
engine
->
subdev
.
index
)
{
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
u32
addr
;
switch
(
nv_engidx
(
object
->
engine
))
{
case
NVDEV_ENGINE_SW
:
return
0
;
case
NVDEV_ENGINE_SW
:
return
0
;
case
NVDEV_ENGINE_GR
:
addr
=
0x0210
;
break
;
case
NVDEV_ENGINE_GR
:
return
0x0210
;
case
NVDEV_ENGINE_CE0
:
addr
=
0x0230
;
break
;
case
NVDEV_ENGINE_CE0
:
return
0x0230
;
case
NVDEV_ENGINE_CE1
:
addr
=
0x0240
;
break
;
case
NVDEV_ENGINE_CE1
:
return
0x0240
;
case
NVDEV_ENGINE_MS
VLD
:
addr
=
0x0270
;
break
;
case
NVDEV_ENGINE_MS
PDEC
:
return
0x0250
;
case
NVDEV_ENGINE_MSP
DEC
:
addr
=
0x0250
;
break
;
case
NVDEV_ENGINE_MSP
PP
:
return
0x0260
;
case
NVDEV_ENGINE_MS
PPP
:
addr
=
0x0260
;
break
;
case
NVDEV_ENGINE_MS
VLD
:
return
0x0270
;
default:
default:
return
-
EINVAL
;
WARN_ON
(
1
);
return
0
;
}
}
}
static
int
gf100_fifo_gpfifo_engine_fini
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
,
bool
suspend
)
{
const
u32
offset
=
gf100_fifo_gpfifo_engine_addr
(
engine
);
struct
gf100_fifo_chan
*
chan
=
gf100_fifo_chan
(
base
);
struct
nvkm_subdev
*
subdev
=
&
chan
->
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_gpuobj
*
inst
=
chan
->
base
.
inst
;
int
ret
=
0
;
nvkm_wr32
(
device
,
0x002634
,
chan
->
base
.
chid
);
nvkm_wr32
(
device
,
0x002634
,
chan
->
base
.
chid
);
if
(
nvkm_msec
(
device
,
2000
,
if
(
nvkm_msec
(
device
,
2000
,
...
@@ -60,143 +64,197 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -60,143 +64,197 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
break
;
break
;
)
<
0
)
{
)
<
0
)
{
nvkm_error
(
subdev
,
"channel %d [%s] kick timeout
\n
"
,
nvkm_error
(
subdev
,
"channel %d [%s] kick timeout
\n
"
,
chan
->
base
.
chid
,
nvkm_client_name
(
chan
));
chan
->
base
.
chid
,
chan
->
base
.
object
.
client
->
name
);
ret
=
-
EBUSY
;
if
(
suspend
)
if
(
suspend
)
return
-
EBUSY
;
return
ret
;
}
if
(
offset
)
{
nvkm_kmap
(
inst
);
nvkm_wo32
(
inst
,
offset
+
0x00
,
0x00000000
);
nvkm_wo32
(
inst
,
offset
+
0x04
,
0x00000000
);
nvkm_done
(
inst
);
}
return
ret
;
}
static
int
gf100_fifo_gpfifo_engine_init
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
const
u32
offset
=
gf100_fifo_gpfifo_engine_addr
(
engine
);
struct
gf100_fifo_chan
*
chan
=
gf100_fifo_chan
(
base
);
struct
nvkm_gpuobj
*
inst
=
chan
->
base
.
inst
;
if
(
offset
)
{
u64
addr
=
chan
->
engn
[
engine
->
subdev
.
index
].
vma
.
offset
;
nvkm_kmap
(
inst
);
nvkm_wo32
(
inst
,
offset
+
0x00
,
lower_32_bits
(
addr
)
|
4
);
nvkm_wo32
(
inst
,
offset
+
0x04
,
upper_32_bits
(
addr
));
nvkm_done
(
inst
);
}
}
nvkm_kmap
(
engn
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
0x00000000
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
0x00000000
);
nvkm_done
(
engn
);
return
0
;
return
0
;
}
}
static
void
gf100_fifo_gpfifo_engine_dtor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
struct
gf100_fifo_chan
*
chan
=
gf100_fifo_chan
(
base
);
nvkm_gpuobj_unmap
(
&
chan
->
engn
[
engine
->
subdev
.
index
].
vma
);
nvkm_gpuobj_del
(
&
chan
->
engn
[
engine
->
subdev
.
index
].
inst
);
}
static
int
static
int
gf100_fifo_context_attach
(
struct
nvkm_object
*
parent
,
gf100_fifo_gpfifo_engine_ctor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_object
*
object
)
struct
nvkm_engine
*
engine
,
struct
nvkm_object
*
object
)
{
{
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gf100_fifo_chan
*
chan
=
gf100_fifo_chan
(
base
);
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
int
engn
=
engine
->
subdev
.
index
;
struct
nvkm_engctx
*
ectx
=
(
void
*
)
object
;
u32
addr
;
int
ret
;
int
ret
;
switch
(
nv_engidx
(
object
->
engine
))
{
if
(
!
gf100_fifo_gpfifo_engine_addr
(
engine
))
case
NVDEV_ENGINE_SW
:
return
0
;
return
0
;
case
NVDEV_ENGINE_GR
:
addr
=
0x0210
;
break
;
case
NVDEV_ENGINE_CE0
:
addr
=
0x0230
;
break
;
case
NVDEV_ENGINE_CE1
:
addr
=
0x0240
;
break
;
case
NVDEV_ENGINE_MSVLD
:
addr
=
0x0270
;
break
;
case
NVDEV_ENGINE_MSPDEC
:
addr
=
0x0250
;
break
;
case
NVDEV_ENGINE_MSPPP
:
addr
=
0x0260
;
break
;
default:
return
-
EINVAL
;
}
if
(
!
ectx
->
vma
.
node
)
{
ret
=
nvkm_gpuobj_map
(
nv_gpuobj
(
ectx
),
base
->
vm
,
NV_MEM_ACCESS_RW
,
&
ectx
->
vma
);
if
(
ret
)
return
ret
;
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
if
(
object
->
oclass
)
{
return
nvkm_gpuobj_map
(
nv_gpuobj
(
object
),
chan
->
vm
,
NV_MEM_ACCESS_RW
,
&
chan
->
engn
[
engn
].
vma
);
}
}
nvkm_kmap
(
engn
);
ret
=
nvkm_object_bind
(
object
,
NULL
,
0
,
&
chan
->
engn
[
engn
].
inst
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
lower_32_bits
(
ectx
->
vma
.
offset
)
|
4
);
if
(
ret
)
nvkm_wo32
(
engn
,
addr
+
0x04
,
upper_32_bits
(
ectx
->
vma
.
offset
));
return
ret
;
nvkm_done
(
engn
);
return
0
;
return
nvkm_gpuobj_map
(
chan
->
engn
[
engn
].
inst
,
chan
->
vm
,
NV_MEM_ACCESS_RW
,
&
chan
->
engn
[
engn
].
vma
);
}
}
static
int
static
void
gf100_fifo_
chan_fini
(
struct
nvkm_object
*
object
,
bool
suspend
)
gf100_fifo_
gpfifo_fini
(
struct
nvkm_fifo_chan
*
base
)
{
{
struct
gf100_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
gf100_fifo
_chan
*
chan
=
gf100_fifo_chan
(
base
)
;
struct
gf100_fifo
_chan
*
chan
=
(
void
*
)
object
;
struct
gf100_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
c
hid
=
chan
->
base
.
chid
;
u32
c
off
=
chan
->
base
.
chid
*
8
;
if
(
chan
->
state
==
RUNNING
&&
(
chan
->
state
=
STOPPED
)
==
STOPPED
)
{
if
(
!
list_empty
(
&
chan
->
head
)
&&
!
chan
->
killed
)
{
nvkm_mask
(
device
,
0x003004
+
(
chid
*
8
),
0x00000001
,
0x00000000
);
list_del_init
(
&
chan
->
head
);
nvkm_mask
(
device
,
0x003004
+
coff
,
0x00000001
,
0x00000000
);
gf100_fifo_runlist_update
(
fifo
);
gf100_fifo_runlist_update
(
fifo
);
}
}
gf100_fifo_intr_engine
(
fifo
);
gf100_fifo_intr_engine
(
fifo
);
nvkm_wr32
(
device
,
0x003000
+
(
chid
*
8
),
0x00000000
);
nvkm_wr32
(
device
,
0x003000
+
coff
,
0x00000000
);
return
nvkm_fifo_channel_fini
(
&
chan
->
base
,
suspend
);
}
}
static
int
static
void
gf100_fifo_
chan_init
(
struct
nvkm_object
*
object
)
gf100_fifo_
gpfifo_init
(
struct
nvkm_fifo_chan
*
base
)
{
{
struct
nvkm_gpuobj
*
base
=
nv_gpuobj
(
object
->
parent
);
struct
gf100_fifo_chan
*
chan
=
gf100_fifo_chan
(
base
);
struct
gf100_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
gf100_fifo
*
fifo
=
chan
->
fifo
;
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
u32
addr
=
chan
->
base
.
inst
->
addr
>>
12
;
int
ret
;
u32
coff
=
chan
->
base
.
chid
*
8
;
ret
=
nvkm_fifo_channel_init
(
&
chan
->
base
);
if
(
ret
)
return
ret
;
nvkm_wr32
(
device
,
0x003000
+
(
chid
*
8
),
0xc0000000
|
base
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x003000
+
coff
,
0xc0000000
|
addr
);
if
(
chan
->
state
==
STOPPED
&&
(
chan
->
state
=
RUNNING
)
==
RUNNING
)
{
if
(
list_empty
(
&
chan
->
head
)
&&
!
chan
->
killed
)
{
nvkm_wr32
(
device
,
0x003004
+
(
chid
*
8
),
0x001f0001
);
list_add_tail
(
&
chan
->
head
,
&
fifo
->
chan
);
nvkm_wr32
(
device
,
0x003004
+
coff
,
0x001f0001
);
gf100_fifo_runlist_update
(
fifo
);
gf100_fifo_runlist_update
(
fifo
);
}
}
}
return
0
;
static
void
*
gf100_fifo_gpfifo_dtor
(
struct
nvkm_fifo_chan
*
base
)
{
struct
gf100_fifo_chan
*
chan
=
gf100_fifo_chan
(
base
);
nvkm_vm_ref
(
NULL
,
&
chan
->
vm
,
chan
->
pgd
);
nvkm_gpuobj_del
(
&
chan
->
pgd
);
return
chan
;
}
}
static
const
struct
nvkm_fifo_chan_func
gf100_fifo_gpfifo_func
=
{
.
dtor
=
gf100_fifo_gpfifo_dtor
,
.
init
=
gf100_fifo_gpfifo_init
,
.
fini
=
gf100_fifo_gpfifo_fini
,
.
ntfy
=
g84_fifo_chan_ntfy
,
.
engine_ctor
=
gf100_fifo_gpfifo_engine_ctor
,
.
engine_dtor
=
gf100_fifo_gpfifo_engine_dtor
,
.
engine_init
=
gf100_fifo_gpfifo_engine_init
,
.
engine_fini
=
gf100_fifo_gpfifo_engine_fini
,
};
static
int
static
int
gf100_fifo_chan_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
gf100_fifo_gpfifo_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_object
**
pobject
)
{
{
union
{
union
{
struct
fermi_channel_gpfifo_v0
v0
;
struct
fermi_channel_gpfifo_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
gf100_fifo
*
fifo
=
(
void
*
)
engine
;
struct
gf100_fifo
*
fifo
=
gf100_fifo
(
base
);
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_object
*
parent
=
oclass
->
parent
;
struct
gf100_fifo_chan
*
chan
;
struct
gf100_fifo_chan
*
chan
;
struct
nvkm_gpuobj
*
ramfc
=
&
base
->
base
.
gpuobj
;
u64
usermem
,
ioffset
,
ilength
;
u64
usermem
,
ioffset
,
ilength
;
int
ret
,
i
;
int
ret
,
i
;
nvif_ioctl
(
parent
,
"create channel gpfifo size %d
\n
"
,
size
);
nvif_ioctl
(
parent
,
"create channel gpfifo size %d
\n
"
,
size
);
if
(
nvif_unpack
(
args
->
v0
,
0
,
0
,
false
))
{
if
(
nvif_unpack
(
args
->
v0
,
0
,
0
,
false
))
{
nvif_ioctl
(
parent
,
"create channel gpfifo vers %d vm %llx"
nvif_ioctl
(
parent
,
"create channel gpfifo vers %d vm %llx
"
"ioffset %016llx ilength %08x
\n
"
,
"ioffset %016llx ilength %08x
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
ioffset
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
ioffset
,
args
->
v0
.
ilength
);
args
->
v0
.
ilength
);
if
(
args
->
v0
.
vm
)
return
-
ENOENT
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
1
,
/* allocate channel */
fifo
->
user
.
bar
.
offset
,
0x1000
,
0
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_CE0
)
|
chan
->
fifo
=
fifo
;
(
1ULL
<<
NVDEV_ENGINE_CE1
)
|
INIT_LIST_HEAD
(
&
chan
->
head
);
(
1ULL
<<
NVDEV_ENGINE_MSVLD
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPDEC
)
|
ret
=
nvkm_fifo_chan_ctor
(
&
gf100_fifo_gpfifo_func
,
&
fifo
->
base
,
(
1ULL
<<
NVDEV_ENGINE_MSPPP
),
&
chan
);
0x1000
,
0x1000
,
true
,
args
->
v0
.
vm
,
0
,
*
pobject
=
nv_object
(
chan
);
(
1ULL
<<
NVDEV_ENGINE_CE0
)
|
(
1ULL
<<
NVDEV_ENGINE_CE1
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPDEC
)
|
(
1ULL
<<
NVDEV_ENGINE_MSPPP
)
|
(
1ULL
<<
NVDEV_ENGINE_MSVLD
)
|
(
1ULL
<<
NVDEV_ENGINE_SW
),
1
,
fifo
->
user
.
bar
.
offset
,
0x1000
,
oclass
,
&
chan
->
base
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
chan
->
base
.
inst
=
base
->
base
.
gpuobj
.
addr
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
context_attach
=
gf100_fifo_context_attach
;
/* page directory */
nv_parent
(
chan
)
->
context_detach
=
gf100_fifo_context_detach
;
ret
=
nvkm_gpuobj_new
(
device
,
0x10000
,
0x1000
,
false
,
NULL
,
&
chan
->
pgd
);
if
(
ret
)
return
ret
;
nvkm_kmap
(
chan
->
base
.
inst
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x0200
,
lower_32_bits
(
chan
->
pgd
->
addr
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x0204
,
upper_32_bits
(
chan
->
pgd
->
addr
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x0208
,
0xffffffff
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x020c
,
0x000000ff
);
nvkm_done
(
chan
->
base
.
inst
);
ret
=
nvkm_vm_ref
(
chan
->
base
.
vm
,
&
chan
->
vm
,
chan
->
pgd
);
if
(
ret
)
return
ret
;
/* clear channel control registers */
usermem
=
chan
->
base
.
chid
*
0x1000
;
usermem
=
chan
->
base
.
chid
*
0x1000
;
ioffset
=
args
->
v0
.
ioffset
;
ioffset
=
args
->
v0
.
ioffset
;
...
@@ -208,97 +266,33 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -208,97 +266,33 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nvkm_done
(
fifo
->
user
.
mem
);
nvkm_done
(
fifo
->
user
.
mem
);
usermem
=
nvkm_memory_addr
(
fifo
->
user
.
mem
)
+
usermem
;
usermem
=
nvkm_memory_addr
(
fifo
->
user
.
mem
)
+
usermem
;
nvkm_kmap
(
ramfc
);
/* RAMFC */
nvkm_wo32
(
ramfc
,
0x08
,
lower_32_bits
(
usermem
));
nvkm_kmap
(
chan
->
base
.
inst
);
nvkm_wo32
(
ramfc
,
0x0c
,
upper_32_bits
(
usermem
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x08
,
lower_32_bits
(
usermem
));
nvkm_wo32
(
ramfc
,
0x10
,
0x0000face
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x0c
,
upper_32_bits
(
usermem
));
nvkm_wo32
(
ramfc
,
0x30
,
0xfffff902
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x10
,
0x0000face
);
nvkm_wo32
(
ramfc
,
0x48
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x30
,
0xfffff902
);
nvkm_wo32
(
ramfc
,
0x4c
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x48
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
ramfc
,
0x54
,
0x00000002
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x4c
,
upper_32_bits
(
ioffset
)
|
nvkm_wo32
(
ramfc
,
0x84
,
0x20400000
);
(
ilength
<<
16
));
nvkm_wo32
(
ramfc
,
0x94
,
0x30000001
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x54
,
0x00000002
);
nvkm_wo32
(
ramfc
,
0x9c
,
0x00000100
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x84
,
0x20400000
);
nvkm_wo32
(
ramfc
,
0xa4
,
0x1f1f1f1f
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x94
,
0x30000001
);
nvkm_wo32
(
ramfc
,
0xa8
,
0x1f1f1f1f
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x9c
,
0x00000100
);
nvkm_wo32
(
ramfc
,
0xac
,
0x0000001f
);
nvkm_wo32
(
chan
->
base
.
inst
,
0xa4
,
0x1f1f1f1f
);
nvkm_wo32
(
ramfc
,
0xb8
,
0xf8000000
);
nvkm_wo32
(
chan
->
base
.
inst
,
0xa8
,
0x1f1f1f1f
);
nvkm_wo32
(
ramfc
,
0xf8
,
0x10003080
);
/* 0x002310 */
nvkm_wo32
(
chan
->
base
.
inst
,
0xac
,
0x0000001f
);
nvkm_wo32
(
ramfc
,
0xfc
,
0x10000010
);
/* 0x002350 */
nvkm_wo32
(
chan
->
base
.
inst
,
0xb8
,
0xf8000000
);
nvkm_done
(
ramfc
);
nvkm_wo32
(
chan
->
base
.
inst
,
0xf8
,
0x10003080
);
/* 0x002310 */
return
0
;
nvkm_wo32
(
chan
->
base
.
inst
,
0xfc
,
0x10000010
);
/* 0x002350 */
}
nvkm_done
(
chan
->
base
.
inst
);
static
struct
nvkm_ofuncs
gf100_fifo_ofuncs
=
{
.
ctor
=
gf100_fifo_chan_ctor
,
.
dtor
=
_nvkm_fifo_channel_dtor
,
.
init
=
gf100_fifo_chan_init
,
.
fini
=
gf100_fifo_chan_fini
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
struct
nvkm_oclass
gf100_fifo_sclass
[]
=
{
{
FERMI_CHANNEL_GPFIFO
,
&
gf100_fifo_ofuncs
},
{}
};
static
int
gf100_fifo_context_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_device
*
device
=
nv_engine
(
engine
)
->
subdev
.
device
;
struct
gf100_fifo_base
*
base
;
int
ret
;
ret
=
nvkm_fifo_context_create
(
parent
,
engine
,
oclass
,
NULL
,
0x1000
,
0x1000
,
NVOBJ_FLAG_ZERO_ALLOC
|
NVOBJ_FLAG_HEAP
,
&
base
);
*
pobject
=
nv_object
(
base
);
if
(
ret
)
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x10000
,
0x1000
,
false
,
NULL
,
&
base
->
pgd
);
if
(
ret
)
return
ret
;
nvkm_kmap
(
&
base
->
base
.
gpuobj
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0200
,
lower_32_bits
(
base
->
pgd
->
addr
));
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0204
,
upper_32_bits
(
base
->
pgd
->
addr
));
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0208
,
0xffffffff
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x020c
,
0x000000ff
);
nvkm_done
(
&
base
->
base
.
gpuobj
);
ret
=
nvkm_vm_ref
(
nvkm_client
(
parent
)
->
vm
,
&
base
->
vm
,
base
->
pgd
);
if
(
ret
)
return
ret
;
return
0
;
return
0
;
}
}
static
void
const
struct
nvkm_fifo_chan_oclass
gf100_fifo_context_dtor
(
struct
nvkm_object
*
object
)
gf100_fifo_gpfifo_oclass
=
{
{
.
base
.
oclass
=
FERMI_CHANNEL_GPFIFO
,
struct
gf100_fifo_base
*
base
=
(
void
*
)
object
;
.
base
.
minver
=
0
,
nvkm_vm_ref
(
NULL
,
&
base
->
vm
,
base
->
pgd
);
.
base
.
maxver
=
0
,
nvkm_gpuobj_del
(
&
base
->
pgd
);
.
ctor
=
gf100_fifo_gpfifo_new
,
nvkm_fifo_context_destroy
(
&
base
->
base
);
}
struct
nvkm_oclass
gf100_fifo_cclass
=
{
.
handle
=
NV_ENGCTX
(
FIFO
,
0xc0
),
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
gf100_fifo_context_ctor
,
.
dtor
=
gf100_fifo_context_dtor
,
.
init
=
_nvkm_fifo_context_init
,
.
fini
=
_nvkm_fifo_context_fini
,
.
rd32
=
_nvkm_fifo_context_rd32
,
.
wr32
=
_nvkm_fifo_context_wr32
,
},
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
View file @
8f0649b5
...
@@ -31,30 +31,13 @@
...
@@ -31,30 +31,13 @@
#include <nvif/class.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
#include <nvif/unpack.h>
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static
const
struct
{
u64
subdev
;
u64
mask
;
}
fifo_engine
[]
=
{
_
(
NVDEV_ENGINE_GR
,
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_CE2
)),
_
(
NVDEV_ENGINE_MSPDEC
,
0
),
_
(
NVDEV_ENGINE_MSPPP
,
0
),
_
(
NVDEV_ENGINE_MSVLD
,
0
),
_
(
NVDEV_ENGINE_CE0
,
0
),
_
(
NVDEV_ENGINE_CE1
,
0
),
_
(
NVDEV_ENGINE_MSENC
,
0
),
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
static
int
static
int
gk104_fifo_
chan
_kick
(
struct
gk104_fifo_chan
*
chan
)
gk104_fifo_
gpfifo
_kick
(
struct
gk104_fifo_chan
*
chan
)
{
{
struct
nvkm_object
*
obj
=
(
void
*
)
chan
;
struct
gk104_fifo
*
fifo
=
chan
->
fifo
;
struct
gk104_fifo
*
fifo
=
(
void
*
)
obj
->
engine
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_client
*
client
=
chan
->
base
.
object
.
client
;
nvkm_wr32
(
device
,
0x002634
,
chan
->
base
.
chid
);
nvkm_wr32
(
device
,
0x002634
,
chan
->
base
.
chid
);
if
(
nvkm_msec
(
device
,
2000
,
if
(
nvkm_msec
(
device
,
2000
,
...
@@ -62,198 +45,249 @@ gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
...
@@ -62,198 +45,249 @@ gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
break
;
break
;
)
<
0
)
{
)
<
0
)
{
nvkm_error
(
subdev
,
"channel %d [%s] kick timeout
\n
"
,
nvkm_error
(
subdev
,
"channel %d [%s] kick timeout
\n
"
,
chan
->
base
.
chid
,
nvkm_client_name
(
chan
)
);
chan
->
base
.
chid
,
client
->
name
);
return
-
EBUSY
;
return
-
EBUSY
;
}
}
return
0
;
return
0
;
}
}
static
int
static
u32
gk104_fifo_context_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
gk104_fifo_gpfifo_engine_addr
(
struct
nvkm_engine
*
engine
)
struct
nvkm_object
*
object
)
{
{
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
switch
(
engine
->
subdev
.
index
)
{
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
parent
;
case
NVDEV_ENGINE_SW
:
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
u32
addr
;
int
ret
;
switch
(
nv_engidx
(
object
->
engine
))
{
case
NVDEV_ENGINE_SW
:
return
0
;
case
NVDEV_ENGINE_CE0
:
case
NVDEV_ENGINE_CE0
:
case
NVDEV_ENGINE_CE1
:
case
NVDEV_ENGINE_CE1
:
case
NVDEV_ENGINE_CE2
:
addr
=
0x0000
;
break
;
case
NVDEV_ENGINE_CE2
:
return
0x0000
;
case
NVDEV_ENGINE_GR
:
addr
=
0x0210
;
break
;
case
NVDEV_ENGINE_GR
:
return
0x0210
;
case
NVDEV_ENGINE_MS
VLD
:
addr
=
0x0270
;
break
;
case
NVDEV_ENGINE_MS
PDEC
:
return
0x0250
;
case
NVDEV_ENGINE_MSP
DEC
:
addr
=
0x0250
;
break
;
case
NVDEV_ENGINE_MSP
PP
:
return
0x0260
;
case
NVDEV_ENGINE_MS
PPP
:
addr
=
0x0260
;
break
;
case
NVDEV_ENGINE_MS
VLD
:
return
0x0270
;
default:
default:
return
-
EINVAL
;
WARN_ON
(
1
);
return
0
;
}
}
}
ret
=
gk104_fifo_chan_kick
(
chan
);
static
int
gk104_fifo_gpfifo_engine_fini
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
,
bool
suspend
)
{
const
u32
offset
=
gk104_fifo_gpfifo_engine_addr
(
engine
);
struct
gk104_fifo_chan
*
chan
=
gk104_fifo_chan
(
base
);
struct
nvkm_gpuobj
*
inst
=
chan
->
base
.
inst
;
int
ret
;
ret
=
gk104_fifo_gpfifo_kick
(
chan
);
if
(
ret
&&
suspend
)
if
(
ret
&&
suspend
)
return
ret
;
return
ret
;
if
(
addr
)
{
if
(
offset
)
{
nvkm_kmap
(
engn
);
nvkm_kmap
(
inst
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
0x00000000
);
nvkm_wo32
(
inst
,
offset
+
0x00
,
0x00000000
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
0x00000000
);
nvkm_wo32
(
inst
,
offset
+
0x04
,
0x00000000
);
nvkm_done
(
engn
);
nvkm_done
(
inst
);
}
return
ret
;
}
static
int
gk104_fifo_gpfifo_engine_init
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
const
u32
offset
=
gk104_fifo_gpfifo_engine_addr
(
engine
);
struct
gk104_fifo_chan
*
chan
=
gk104_fifo_chan
(
base
);
struct
nvkm_gpuobj
*
inst
=
chan
->
base
.
inst
;
if
(
offset
)
{
u64
addr
=
chan
->
engn
[
engine
->
subdev
.
index
].
vma
.
offset
;
nvkm_kmap
(
inst
);
nvkm_wo32
(
inst
,
offset
+
0x00
,
lower_32_bits
(
addr
)
|
4
);
nvkm_wo32
(
inst
,
offset
+
0x04
,
upper_32_bits
(
addr
));
nvkm_done
(
inst
);
}
}
return
0
;
return
0
;
}
}
static
void
gk104_fifo_gpfifo_engine_dtor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_engine
*
engine
)
{
struct
gk104_fifo_chan
*
chan
=
gk104_fifo_chan
(
base
);
nvkm_gpuobj_unmap
(
&
chan
->
engn
[
engine
->
subdev
.
index
].
vma
);
nvkm_gpuobj_del
(
&
chan
->
engn
[
engine
->
subdev
.
index
].
inst
);
}
static
int
static
int
gk104_fifo_context_attach
(
struct
nvkm_object
*
parent
,
gk104_fifo_gpfifo_engine_ctor
(
struct
nvkm_fifo_chan
*
base
,
struct
nvkm_object
*
object
)
struct
nvkm_engine
*
engine
,
struct
nvkm_object
*
object
)
{
{
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gk104_fifo_chan
*
chan
=
gk104_fifo_chan
(
base
);
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
int
engn
=
engine
->
subdev
.
index
;
struct
nvkm_engctx
*
ectx
=
(
void
*
)
object
;
u32
addr
;
int
ret
;
int
ret
;
switch
(
nv_engidx
(
object
->
engine
))
{
if
(
!
gk104_fifo_gpfifo_engine_addr
(
engine
))
case
NVDEV_ENGINE_SW
:
return
0
;
case
NVDEV_ENGINE_CE0
:
case
NVDEV_ENGINE_CE1
:
case
NVDEV_ENGINE_CE2
:
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
return
0
;
return
0
;
case
NVDEV_ENGINE_GR
:
addr
=
0x0210
;
break
;
case
NVDEV_ENGINE_MSVLD
:
addr
=
0x0270
;
break
;
case
NVDEV_ENGINE_MSPDEC
:
addr
=
0x0250
;
break
;
case
NVDEV_ENGINE_MSPPP
:
addr
=
0x0260
;
break
;
default:
return
-
EINVAL
;
}
if
(
!
ectx
->
vma
.
node
)
{
if
(
object
->
oclass
)
{
ret
=
nvkm_gpuobj_map
(
nv_gpuobj
(
ectx
),
base
->
vm
,
return
nvkm_gpuobj_map
(
nv_gpuobj
(
object
),
chan
->
vm
,
NV_MEM_ACCESS_RW
,
&
ectx
->
vma
);
NV_MEM_ACCESS_RW
,
if
(
ret
)
&
chan
->
engn
[
engn
].
vma
);
return
ret
;
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
}
}
nvkm_kmap
(
engn
);
ret
=
nvkm_object_bind
(
object
,
NULL
,
0
,
&
chan
->
engn
[
engn
].
inst
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
lower_32_bits
(
ectx
->
vma
.
offset
)
|
4
);
if
(
ret
)
nvkm_wo32
(
engn
,
addr
+
0x04
,
upper_32_bits
(
ectx
->
vma
.
offset
));
return
ret
;
nvkm_done
(
engn
);
return
0
;
return
nvkm_gpuobj_map
(
chan
->
engn
[
engn
].
inst
,
chan
->
vm
,
NV_MEM_ACCESS_RW
,
&
chan
->
engn
[
engn
].
vma
);
}
}
static
int
static
void
gk104_fifo_
chan_fini
(
struct
nvkm_object
*
object
,
bool
suspend
)
gk104_fifo_
gpfifo_fini
(
struct
nvkm_fifo_chan
*
base
)
{
{
struct
gk104_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
gk104_fifo
_chan
*
chan
=
gk104_fifo_chan
(
base
)
;
struct
gk104_fifo
_chan
*
chan
=
(
void
*
)
object
;
struct
gk104_fifo
*
fifo
=
chan
->
fifo
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
c
hid
=
chan
->
base
.
chid
;
u32
c
off
=
chan
->
base
.
chid
*
8
;
if
(
chan
->
state
==
RUNNING
&&
(
chan
->
state
=
STOPPED
)
==
STOPPED
)
{
if
(
!
list_empty
(
&
chan
->
head
))
{
nvkm_mask
(
device
,
0x800004
+
(
chid
*
8
),
0x00000800
,
0x00000800
);
list_del_init
(
&
chan
->
head
);
nvkm_mask
(
device
,
0x800004
+
coff
,
0x00000800
,
0x00000800
);
gk104_fifo_runlist_update
(
fifo
,
chan
->
engine
);
gk104_fifo_runlist_update
(
fifo
,
chan
->
engine
);
}
}
nvkm_wr32
(
device
,
0x800000
+
(
chid
*
8
),
0x00000000
);
nvkm_wr32
(
device
,
0x800000
+
coff
,
0x00000000
);
return
nvkm_fifo_channel_fini
(
&
chan
->
base
,
suspend
);
}
}
static
int
static
void
gk104_fifo_
chan_init
(
struct
nvkm_object
*
object
)
gk104_fifo_
gpfifo_init
(
struct
nvkm_fifo_chan
*
base
)
{
{
struct
nvkm_gpuobj
*
base
=
nv_gpuobj
(
object
->
parent
);
struct
gk104_fifo_chan
*
chan
=
gk104_fifo_chan
(
base
);
struct
gk104_fifo
*
fifo
=
(
void
*
)
object
->
engine
;
struct
gk104_fifo
*
fifo
=
chan
->
fifo
;
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
u32
chid
=
chan
->
base
.
chid
;
u32
addr
=
chan
->
base
.
inst
->
addr
>>
12
;
int
ret
;
u32
coff
=
chan
->
base
.
chid
*
8
;
ret
=
nvkm_fifo_channel_init
(
&
chan
->
base
);
nvkm_mask
(
device
,
0x800004
+
coff
,
0x000f0000
,
chan
->
engine
<<
16
);
if
(
ret
)
nvkm_wr32
(
device
,
0x800000
+
coff
,
0x80000000
|
addr
);
return
ret
;
nvkm_mask
(
device
,
0x800004
+
(
chid
*
8
),
0x000f0000
,
chan
->
engine
<<
16
);
if
(
list_empty
(
&
chan
->
head
)
&&
!
chan
->
killed
)
{
nvkm_wr32
(
device
,
0x800000
+
(
chid
*
8
),
0x80000000
|
base
->
addr
>>
12
);
list_add_tail
(
&
chan
->
head
,
&
fifo
->
engine
[
chan
->
engine
].
chan
);
nvkm_mask
(
device
,
0x800004
+
coff
,
0x00000400
,
0x00000400
);
if
(
chan
->
state
==
STOPPED
&&
(
chan
->
state
=
RUNNING
)
==
RUNNING
)
{
nvkm_mask
(
device
,
0x800004
+
(
chid
*
8
),
0x00000400
,
0x00000400
);
gk104_fifo_runlist_update
(
fifo
,
chan
->
engine
);
gk104_fifo_runlist_update
(
fifo
,
chan
->
engine
);
nvkm_mask
(
device
,
0x800004
+
(
chid
*
8
)
,
0x00000400
,
0x00000400
);
nvkm_mask
(
device
,
0x800004
+
coff
,
0x00000400
,
0x00000400
);
}
}
}
return
0
;
static
void
*
gk104_fifo_gpfifo_dtor
(
struct
nvkm_fifo_chan
*
base
)
{
struct
gk104_fifo_chan
*
chan
=
gk104_fifo_chan
(
base
);
nvkm_vm_ref
(
NULL
,
&
chan
->
vm
,
chan
->
pgd
);
nvkm_gpuobj_del
(
&
chan
->
pgd
);
return
chan
;
}
}
static
int
static
const
struct
nvkm_fifo_chan_func
gk104_fifo_chan_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
gk104_fifo_gpfifo_func
=
{
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
.
dtor
=
gk104_fifo_gpfifo_dtor
,
struct
nvkm_object
**
pobject
)
.
init
=
gk104_fifo_gpfifo_init
,
.
fini
=
gk104_fifo_gpfifo_fini
,
.
ntfy
=
g84_fifo_chan_ntfy
,
.
engine_ctor
=
gk104_fifo_gpfifo_engine_ctor
,
.
engine_dtor
=
gk104_fifo_gpfifo_engine_dtor
,
.
engine_init
=
gk104_fifo_gpfifo_engine_init
,
.
engine_fini
=
gk104_fifo_gpfifo_engine_fini
,
};
int
gk104_fifo_gpfifo_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
{
union
{
union
{
struct
kepler_channel_gpfifo_a_v0
v0
;
struct
kepler_channel_gpfifo_a_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
gk104_fifo
*
fifo
=
(
void
*
)
engine
;
struct
gk104_fifo
*
fifo
=
gk104_fifo
(
base
);
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_object
*
parent
=
oclass
->
parent
;
struct
gk104_fifo_chan
*
chan
;
struct
gk104_fifo_chan
*
chan
;
struct
nvkm_gpuobj
*
ramfc
=
&
base
->
base
.
gpuobj
;
u64
usermem
,
ioffset
,
ilength
;
u64
usermem
,
ioffset
,
ilength
;
u32
engines
;
u32
engines
;
int
ret
,
i
;
int
ret
,
i
;
nvif_ioctl
(
parent
,
"create channel gpfifo size %d
\n
"
,
size
);
nvif_ioctl
(
parent
,
"create channel gpfifo size %d
\n
"
,
size
);
if
(
nvif_unpack
(
args
->
v0
,
0
,
0
,
false
))
{
if
(
nvif_unpack
(
args
->
v0
,
0
,
0
,
false
))
{
nvif_ioctl
(
parent
,
"create channel gpfifo vers %d vm %llx"
nvif_ioctl
(
parent
,
"create channel gpfifo vers %d vm %llx
"
"ioffset %016llx ilength %08x engine %08x
\n
"
,
"ioffset %016llx ilength %08x engine %08x
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
ioffset
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
ioffset
,
args
->
v0
.
ilength
,
args
->
v0
.
engine
);
args
->
v0
.
ilength
,
args
->
v0
.
engine
);
if
(
args
->
v0
.
vm
)
return
-
ENOENT
;
}
else
}
else
return
ret
;
return
ret
;
for
(
i
=
0
,
engines
=
0
;
i
<
FIFO_ENGINE_NR
;
i
++
)
{
/* determine which downstream engines are present */
if
(
!
nvkm_engine
(
parent
,
fifo_engine
[
i
].
subdev
))
for
(
i
=
0
,
engines
=
0
;
i
<
ARRAY_SIZE
(
fifo
->
engine
);
i
++
)
{
u64
subdevs
=
gk104_fifo_engine_subdev
(
i
);
if
(
!
nvkm_device_engine
(
device
,
__ffs64
(
subdevs
)))
continue
;
continue
;
engines
|=
(
1
<<
i
);
engines
|=
(
1
<<
i
);
}
}
/* if this is an engine mask query, we're done */
if
(
!
args
->
v0
.
engine
)
{
if
(
!
args
->
v0
.
engine
)
{
static
struct
nvkm_oclass
oclass
=
{
.
ofuncs
=
&
nvkm_object_ofuncs
,
};
args
->
v0
.
engine
=
engines
;
args
->
v0
.
engine
=
engines
;
return
nvkm_object_
old
(
parent
,
engine
,
&
oclass
,
NULL
,
0
,
pobject
);
return
nvkm_object_
new
(
oclass
,
NULL
,
0
,
pobject
);
}
}
engines
&=
args
->
v0
.
engine
;
/* check that we support a requested engine - note that the user
if
(
!
engines
)
{
* argument is a mask in order to allow the user to request (for
nvif_ioctl
(
parent
,
"unsupported engines %08x
\n
"
,
* example) *any* copy engine, but doesn't matter which.
args
->
v0
.
engine
);
*/
args
->
v0
.
engine
&=
engines
;
if
(
!
args
->
v0
.
engine
)
{
nvif_ioctl
(
parent
,
"no supported engine
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
i
=
__ffs
(
engines
);
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
1
,
/* allocate the channel */
fifo
->
user
.
bar
.
offset
,
0x200
,
0
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
fifo_engine
[
i
].
mask
,
&
chan
);
return
-
ENOMEM
;
*
pobject
=
nv_object
(
chan
);
*
pobject
=
&
chan
->
base
.
object
;
chan
->
fifo
=
fifo
;
chan
->
engine
=
__ffs
(
args
->
v0
.
engine
);
INIT_LIST_HEAD
(
&
chan
->
head
);
ret
=
nvkm_fifo_chan_ctor
(
&
gk104_fifo_gpfifo_func
,
&
fifo
->
base
,
0x1000
,
0x1000
,
true
,
args
->
v0
.
vm
,
0
,
gk104_fifo_engine_subdev
(
chan
->
engine
),
1
,
fifo
->
user
.
bar
.
offset
,
0x200
,
oclass
,
&
chan
->
base
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
chan
->
base
.
inst
=
base
->
base
.
gpuobj
.
addr
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
context_attach
=
gk104_fifo_context_attach
;
/* page directory */
nv_parent
(
chan
)
->
context_detach
=
gk104_fifo_context_detach
;
ret
=
nvkm_gpuobj_new
(
device
,
0x10000
,
0x1000
,
false
,
NULL
,
&
chan
->
pgd
);
chan
->
engine
=
i
;
if
(
ret
)
return
ret
;
nvkm_kmap
(
chan
->
base
.
inst
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x0200
,
lower_32_bits
(
chan
->
pgd
->
addr
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x0204
,
upper_32_bits
(
chan
->
pgd
->
addr
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x0208
,
0xffffffff
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x020c
,
0x000000ff
);
nvkm_done
(
chan
->
base
.
inst
);
ret
=
nvkm_vm_ref
(
chan
->
base
.
vm
,
&
chan
->
vm
,
chan
->
pgd
);
if
(
ret
)
return
ret
;
/* clear channel control registers */
usermem
=
chan
->
base
.
chid
*
0x200
;
usermem
=
chan
->
base
.
chid
*
0x200
;
ioffset
=
args
->
v0
.
ioffset
;
ioffset
=
args
->
v0
.
ioffset
;
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
...
@@ -264,94 +298,31 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -264,94 +298,31 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nvkm_done
(
fifo
->
user
.
mem
);
nvkm_done
(
fifo
->
user
.
mem
);
usermem
=
nvkm_memory_addr
(
fifo
->
user
.
mem
)
+
usermem
;
usermem
=
nvkm_memory_addr
(
fifo
->
user
.
mem
)
+
usermem
;
nvkm_kmap
(
ramfc
);
/* RAMFC */
nvkm_wo32
(
ramfc
,
0x08
,
lower_32_bits
(
usermem
));
nvkm_kmap
(
chan
->
base
.
inst
);
nvkm_wo32
(
ramfc
,
0x0c
,
upper_32_bits
(
usermem
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x08
,
lower_32_bits
(
usermem
));
nvkm_wo32
(
ramfc
,
0x10
,
0x0000face
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x0c
,
upper_32_bits
(
usermem
));
nvkm_wo32
(
ramfc
,
0x30
,
0xfffff902
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x10
,
0x0000face
);
nvkm_wo32
(
ramfc
,
0x48
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x30
,
0xfffff902
);
nvkm_wo32
(
ramfc
,
0x4c
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
chan
->
base
.
inst
,
0x48
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
ramfc
,
0x84
,
0x20400000
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x4c
,
upper_32_bits
(
ioffset
)
|
nvkm_wo32
(
ramfc
,
0x94
,
0x30000001
);
(
ilength
<<
16
));
nvkm_wo32
(
ramfc
,
0x9c
,
0x00000100
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x84
,
0x20400000
);
nvkm_wo32
(
ramfc
,
0xac
,
0x0000001f
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x94
,
0x30000001
);
nvkm_wo32
(
ramfc
,
0xe8
,
chan
->
base
.
chid
);
nvkm_wo32
(
chan
->
base
.
inst
,
0x9c
,
0x00000100
);
nvkm_wo32
(
ramfc
,
0xb8
,
0xf8000000
);
nvkm_wo32
(
chan
->
base
.
inst
,
0xac
,
0x0000001f
);
nvkm_wo32
(
ramfc
,
0xf8
,
0x10003080
);
/* 0x002310 */
nvkm_wo32
(
chan
->
base
.
inst
,
0xe8
,
chan
->
base
.
chid
);
nvkm_wo32
(
ramfc
,
0xfc
,
0x10000010
);
/* 0x002350 */
nvkm_wo32
(
chan
->
base
.
inst
,
0xb8
,
0xf8000000
);
nvkm_done
(
ramfc
);
nvkm_wo32
(
chan
->
base
.
inst
,
0xf8
,
0x10003080
);
/* 0x002310 */
return
0
;
nvkm_wo32
(
chan
->
base
.
inst
,
0xfc
,
0x10000010
);
/* 0x002350 */
}
nvkm_done
(
chan
->
base
.
inst
);
struct
nvkm_ofuncs
gk104_fifo_chan_ofuncs
=
{
.
ctor
=
gk104_fifo_chan_ctor
,
.
dtor
=
_nvkm_fifo_channel_dtor
,
.
init
=
gk104_fifo_chan_init
,
.
fini
=
gk104_fifo_chan_fini
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
struct
nvkm_oclass
gk104_fifo_sclass
[]
=
{
{
KEPLER_CHANNEL_GPFIFO_A
,
&
gk104_fifo_chan_ofuncs
},
{}
};
static
int
gk104_fifo_context_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_device
*
device
=
nv_engine
(
engine
)
->
subdev
.
device
;
struct
gk104_fifo_base
*
base
;
int
ret
;
ret
=
nvkm_fifo_context_create
(
parent
,
engine
,
oclass
,
NULL
,
0x1000
,
0x1000
,
NVOBJ_FLAG_ZERO_ALLOC
,
&
base
);
*
pobject
=
nv_object
(
base
);
if
(
ret
)
return
ret
;
ret
=
nvkm_gpuobj_new
(
device
,
0x10000
,
0x1000
,
false
,
NULL
,
&
base
->
pgd
);
if
(
ret
)
return
ret
;
nvkm_kmap
(
&
base
->
base
.
gpuobj
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0200
,
lower_32_bits
(
base
->
pgd
->
addr
));
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0204
,
upper_32_bits
(
base
->
pgd
->
addr
));
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0208
,
0xffffffff
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x020c
,
0x000000ff
);
nvkm_done
(
&
base
->
base
.
gpuobj
);
ret
=
nvkm_vm_ref
(
nvkm_client
(
parent
)
->
vm
,
&
base
->
vm
,
base
->
pgd
);
if
(
ret
)
return
ret
;
return
0
;
return
0
;
}
}
static
void
const
struct
nvkm_fifo_chan_oclass
gk104_fifo_context_dtor
(
struct
nvkm_object
*
object
)
gk104_fifo_gpfifo_oclass
=
{
{
.
base
.
oclass
=
KEPLER_CHANNEL_GPFIFO_A
,
struct
gk104_fifo_base
*
base
=
(
void
*
)
object
;
.
base
.
minver
=
0
,
nvkm_vm_ref
(
NULL
,
&
base
->
vm
,
base
->
pgd
);
.
base
.
maxver
=
0
,
nvkm_gpuobj_del
(
&
base
->
pgd
);
.
ctor
=
gk104_fifo_gpfifo_new
,
nvkm_fifo_context_destroy
(
&
base
->
base
);
}
struct
nvkm_oclass
gk104_fifo_cclass
=
{
.
handle
=
NV_ENGCTX
(
FIFO
,
0xe0
),
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
gk104_fifo_context_ctor
,
.
dtor
=
gk104_fifo_context_dtor
,
.
init
=
_nvkm_fifo_context_init
,
.
fini
=
_nvkm_fifo_context_fini
,
.
rd32
=
_nvkm_fifo_context_rd32
,
.
wr32
=
_nvkm_fifo_context_wr32
,
},
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
View file @
8f0649b5
...
@@ -25,8 +25,10 @@
...
@@ -25,8 +25,10 @@
#include <nvif/class.h>
#include <nvif/class.h>
struct
nvkm_oclass
const
struct
nvkm_fifo_chan_oclass
gm204_fifo_sclass
[]
=
{
gm204_fifo_gpfifo_oclass
=
{
{
MAXWELL_CHANNEL_GPFIFO_A
,
&
gk104_fifo_chan_ofuncs
},
.
base
.
oclass
=
MAXWELL_CHANNEL_GPFIFO_A
,
{}
.
base
.
minver
=
0
,
.
base
.
maxver
=
0
,
.
ctor
=
gk104_fifo_gpfifo_new
,
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
View file @
8f0649b5
...
@@ -30,15 +30,14 @@
...
@@ -30,15 +30,14 @@
#include <nvif/unpack.h>
#include <nvif/unpack.h>
static
int
static
int
nv50_fifo_chan_ctor_ind
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nv50_fifo_gpfifo_new
(
struct
nvkm_fifo
*
base
,
const
struct
nvkm_oclass
*
oclass
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
struct
nvkm_object
**
pobject
)
{
{
struct
nvkm_object
*
parent
=
oclass
->
parent
;
union
{
union
{
struct
nv50_channel_gpfifo_v0
v0
;
struct
nv50_channel_gpfifo_v0
v0
;
}
*
args
=
data
;
}
*
args
=
data
;
struct
nvkm_device
*
device
=
parent
->
engine
->
subdev
.
device
;
struct
nv50_fifo
*
fifo
=
nv50_fifo
(
base
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
;
struct
nv50_fifo_chan
*
chan
;
u64
ioffset
,
ilength
;
u64
ioffset
,
ilength
;
int
ret
;
int
ret
;
...
@@ -50,61 +49,44 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -50,61 +49,44 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
"ilength %08x
\n
"
,
"ilength %08x
\n
"
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
args
->
v0
.
version
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
args
->
v0
.
ioffset
,
args
->
v0
.
ilength
);
args
->
v0
.
ioffset
,
args
->
v0
.
ilength
);
if
(
args
->
v0
.
vm
)
if
(
!
args
->
v0
.
pushbuf
)
return
-
E
NOENT
;
return
-
E
INVAL
;
}
else
}
else
return
ret
;
return
ret
;
ret
=
nvkm_fifo_channel_create
(
parent
,
engine
,
oclass
,
0
,
0xc00000
,
if
(
!
(
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
)))
0x2000
,
args
->
v0
.
pushbuf
,
return
-
ENOMEM
;
(
1ULL
<<
NVDEV_ENGINE_DMAOBJ
)
|
*
pobject
=
&
chan
->
base
.
object
;
(
1ULL
<<
NVDEV_ENGINE_SW
)
|
(
1ULL
<<
NVDEV_ENGINE_GR
)
|
(
1ULL
<<
NVDEV_ENGINE_MPEG
),
&
chan
);
*
pobject
=
nv_object
(
chan
);
if
(
ret
)
return
ret
;
chan
->
base
.
inst
=
base
->
base
.
gpuobj
.
addr
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
nv_parent
(
chan
)
->
context_attach
=
nv50_fifo_context_attach
;
nv_parent
(
chan
)
->
context_detach
=
nv50_fifo_context_detach
;
nv_parent
(
chan
)
->
object_attach
=
nv50_fifo_object_attach
;
nv_parent
(
chan
)
->
object_detach
=
nv50_fifo_object_detach
;
ret
=
nv
km_ramht_new
(
device
,
0x8000
,
16
,
&
base
->
base
.
gpuobj
,
ret
=
nv
50_fifo_chan_ctor
(
fifo
,
args
->
v0
.
vm
,
args
->
v0
.
pushbuf
,
&
chan
->
ramht
);
oclass
,
chan
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
args
->
v0
.
chid
=
chan
->
base
.
chid
;
ioffset
=
args
->
v0
.
ioffset
;
ioffset
=
args
->
v0
.
ioffset
;
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
nvkm_kmap
(
base
->
ramfc
);
nvkm_kmap
(
chan
->
ramfc
);
nvkm_wo32
(
base
->
ramfc
,
0x3c
,
0x403f6078
);
nvkm_wo32
(
chan
->
ramfc
,
0x3c
,
0x403f6078
);
nvkm_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
chan
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nvkm_wo32
(
chan
->
ramfc
,
0x48
,
chan
->
base
.
push
->
node
->
offset
>>
4
);
nvkm_wo32
(
base
->
ramfc
,
0x50
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
chan
->
ramfc
,
0x50
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
base
->
ramfc
,
0x54
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
chan
->
ramfc
,
0x54
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
chan
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
chan
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
chan
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
nvkm_wo32
(
chan
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
chan
->
ramht
->
gpuobj
->
node
->
offset
>>
4
));
(
chan
->
ramht
->
gpuobj
->
node
->
offset
>>
4
));
nvkm_done
(
base
->
ramfc
);
nvkm_done
(
chan
->
ramfc
);
return
0
;
return
0
;
}
}
struct
nvkm_ofuncs
const
struct
nvkm_fifo_chan_oclass
nv50_fifo_ofuncs_ind
=
{
nv50_fifo_gpfifo_oclass
=
{
.
ctor
=
nv50_fifo_chan_ctor_ind
,
.
base
.
oclass
=
NV50_CHANNEL_GPFIFO
,
.
dtor
=
nv50_fifo_chan_dtor
,
.
base
.
minver
=
0
,
.
init
=
nv50_fifo_chan_init
,
.
base
.
maxver
=
0
,
.
fini
=
nv50_fifo_chan_fini
,
.
ctor
=
nv50_fifo_gpfifo_new
,
.
map
=
_nvkm_fifo_channel_map
,
.
rd32
=
_nvkm_fifo_channel_rd32
,
.
wr32
=
_nvkm_fifo_channel_wr32
,
.
ntfy
=
_nvkm_fifo_channel_ntfy
};
};
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
View file @
8f0649b5
...
@@ -25,6 +25,7 @@
...
@@ -25,6 +25,7 @@
#include "channv04.h"
#include "channv04.h"
#include "regsnv04.h"
#include "regsnv04.h"
#include <core/client.h>
#include <core/handle.h>
#include <core/handle.h>
#include <core/ramht.h>
#include <core/ramht.h>
#include <subdev/instmem.h>
#include <subdev/instmem.h>
...
@@ -136,6 +137,8 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
...
@@ -136,6 +137,8 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
{
{
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_fifo_chan
*
chan
;
unsigned
long
flags
;
u32
pull0
=
nvkm_rd32
(
device
,
0x003250
);
u32
pull0
=
nvkm_rd32
(
device
,
0x003250
);
u32
mthd
,
data
;
u32
mthd
,
data
;
int
ptr
;
int
ptr
;
...
@@ -157,12 +160,12 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
...
@@ -157,12 +160,12 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
if
(
!
(
pull0
&
0x00000100
)
||
if
(
!
(
pull0
&
0x00000100
)
||
!
nv04_fifo_swmthd
(
device
,
chid
,
mthd
,
data
))
{
!
nv04_fifo_swmthd
(
device
,
chid
,
mthd
,
data
))
{
const
char
*
client_name
=
chan
=
nvkm_fifo_chan_chid
(
&
fifo
->
base
,
chid
,
&
flags
);
nvkm_client_name_for_fifo_chid
(
&
fifo
->
base
,
chid
);
nvkm_error
(
subdev
,
"CACHE_ERROR - "
nvkm_error
(
subdev
,
"CACHE_ERROR - "
"ch %d [%s] subc %d mthd %04x data %08x
\n
"
,
"ch %d [%s] subc %d mthd %04x data %08x
\n
"
,
chid
,
client_name
,
(
mthd
>>
13
)
&
7
,
mthd
&
0x1ffc
,
chid
,
chan
?
chan
->
object
.
client
->
name
:
"unknown"
,
data
);
(
mthd
>>
13
)
&
7
,
mthd
&
0x1ffc
,
data
);
nvkm_fifo_chan_put
(
&
fifo
->
base
,
flags
,
&
chan
);
}
}
nvkm_wr32
(
device
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
0
);
nvkm_wr32
(
device
,
NV04_PFIFO_CACHE1_DMA_PUSH
,
0
);
...
@@ -189,10 +192,12 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
...
@@ -189,10 +192,12 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
u32
dma_put
=
nvkm_rd32
(
device
,
0x003240
);
u32
dma_put
=
nvkm_rd32
(
device
,
0x003240
);
u32
push
=
nvkm_rd32
(
device
,
0x003220
);
u32
push
=
nvkm_rd32
(
device
,
0x003220
);
u32
state
=
nvkm_rd32
(
device
,
0x003228
);
u32
state
=
nvkm_rd32
(
device
,
0x003228
);
const
char
*
client_name
;
struct
nvkm_fifo_chan
*
chan
;
unsigned
long
flags
;
c
lient_name
=
nvkm_client_name_for_fifo_chid
(
&
fifo
->
base
,
chid
)
;
c
onst
char
*
name
;
chan
=
nvkm_fifo_chan_chid
(
&
fifo
->
base
,
chid
,
&
flags
);
name
=
chan
?
chan
->
object
.
client
->
name
:
"unknown"
;
if
(
device
->
card_type
==
NV_50
)
{
if
(
device
->
card_type
==
NV_50
)
{
u32
ho_get
=
nvkm_rd32
(
device
,
0x003328
);
u32
ho_get
=
nvkm_rd32
(
device
,
0x003328
);
u32
ho_put
=
nvkm_rd32
(
device
,
0x003320
);
u32
ho_put
=
nvkm_rd32
(
device
,
0x003320
);
...
@@ -202,7 +207,7 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
...
@@ -202,7 +207,7 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
nvkm_error
(
subdev
,
"DMA_PUSHER - "
nvkm_error
(
subdev
,
"DMA_PUSHER - "
"ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
"ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
"ib_put %08x state %08x (err: %s) push %08x
\n
"
,
"ib_put %08x state %08x (err: %s) push %08x
\n
"
,
chid
,
client_
name
,
ho_get
,
dma_get
,
ho_put
,
dma_put
,
chid
,
name
,
ho_get
,
dma_get
,
ho_put
,
dma_put
,
ib_get
,
ib_put
,
state
,
nv_dma_state_err
(
state
),
ib_get
,
ib_put
,
state
,
nv_dma_state_err
(
state
),
push
);
push
);
...
@@ -217,12 +222,13 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
...
@@ -217,12 +222,13 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
}
else
{
}
else
{
nvkm_error
(
subdev
,
"DMA_PUSHER - ch %d [%s] get %08x put %08x "
nvkm_error
(
subdev
,
"DMA_PUSHER - ch %d [%s] get %08x put %08x "
"state %08x (err: %s) push %08x
\n
"
,
"state %08x (err: %s) push %08x
\n
"
,
chid
,
client_
name
,
dma_get
,
dma_put
,
state
,
chid
,
name
,
dma_get
,
dma_put
,
state
,
nv_dma_state_err
(
state
),
push
);
nv_dma_state_err
(
state
),
push
);
if
(
dma_get
!=
dma_put
)
if
(
dma_get
!=
dma_put
)
nvkm_wr32
(
device
,
0x003244
,
dma_put
);
nvkm_wr32
(
device
,
0x003244
,
dma_put
);
}
}
nvkm_fifo_chan_put
(
&
fifo
->
base
,
flags
,
&
chan
);
nvkm_wr32
(
device
,
0x003228
,
0x00000000
);
nvkm_wr32
(
device
,
0x003228
,
0x00000000
);
nvkm_wr32
(
device
,
0x003220
,
0x00000001
);
nvkm_wr32
(
device
,
0x003220
,
0x00000001
);
...
@@ -241,7 +247,7 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
...
@@ -241,7 +247,7 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
reassign
=
nvkm_rd32
(
device
,
NV03_PFIFO_CACHES
)
&
1
;
reassign
=
nvkm_rd32
(
device
,
NV03_PFIFO_CACHES
)
&
1
;
nvkm_wr32
(
device
,
NV03_PFIFO_CACHES
,
0
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHES
,
0
);
chid
=
nvkm_rd32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
)
&
fifo
->
base
.
max
;
chid
=
nvkm_rd32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
)
&
(
fifo
->
base
.
nr
-
1
)
;
get
=
nvkm_rd32
(
device
,
NV03_PFIFO_CACHE1_GET
);
get
=
nvkm_rd32
(
device
,
NV03_PFIFO_CACHE1_GET
);
if
(
stat
&
NV_PFIFO_INTR_CACHE_ERROR
)
{
if
(
stat
&
NV_PFIFO_INTR_CACHE_ERROR
)
{
...
@@ -311,7 +317,7 @@ nv04_fifo_init(struct nvkm_object *object)
...
@@ -311,7 +317,7 @@ nv04_fifo_init(struct nvkm_object *object)
nvkm_wr32
(
device
,
NV03_PFIFO_RAMRO
,
nvkm_memory_addr
(
ramro
)
>>
8
);
nvkm_wr32
(
device
,
NV03_PFIFO_RAMRO
,
nvkm_memory_addr
(
ramro
)
>>
8
);
nvkm_wr32
(
device
,
NV03_PFIFO_RAMFC
,
nvkm_memory_addr
(
ramfc
)
>>
8
);
nvkm_wr32
(
device
,
NV03_PFIFO_RAMFC
,
nvkm_memory_addr
(
ramfc
)
>>
8
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
nr
-
1
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
...
@@ -329,6 +335,14 @@ nv04_fifo_dtor(struct nvkm_object *object)
...
@@ -329,6 +335,14 @@ nv04_fifo_dtor(struct nvkm_object *object)
nvkm_fifo_destroy
(
&
fifo
->
base
);
nvkm_fifo_destroy
(
&
fifo
->
base
);
}
}
static
const
struct
nvkm_fifo_func
nv04_fifo_func
=
{
.
chan
=
{
&
nv04_fifo_dma_oclass
,
NULL
},
};
static
int
static
int
nv04_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nv04_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
...
@@ -342,10 +356,10 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -342,10 +356,10 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
base
.
func
=
&
nv04_fifo_func
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_engine
(
fifo
)
->
cclass
=
&
nv04_fifo_cclass
;
nv_engine
(
fifo
)
->
sclass
=
nv04_fifo_sclass
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
ramfc_desc
=
nv04_ramfc
;
fifo
->
ramfc_desc
=
nv04_ramfc
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
View file @
8f0649b5
#ifndef __NV04_FIFO_H__
#ifndef __NV04_FIFO_H__
#define __NV04_FIFO_H__
#define __NV04_FIFO_H__
#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
#include "priv.h"
#include "priv.h"
struct
ramfc_desc
{
struct
ramfc_desc
{
...
@@ -15,14 +16,6 @@ struct nv04_fifo {
...
@@ -15,14 +16,6 @@ struct nv04_fifo {
struct
ramfc_desc
*
ramfc_desc
;
struct
ramfc_desc
*
ramfc_desc
;
};
};
struct
nv04_fifo_base
{
struct
nvkm_fifo_base
base
;
};
int
nv04_fifo_context_ctor
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
struct
nvkm_oclass
*
,
void
*
,
u32
,
struct
nvkm_object
**
);
void
nv04_fifo_dtor
(
struct
nvkm_object
*
);
void
nv04_fifo_dtor
(
struct
nvkm_object
*
);
int
nv04_fifo_init
(
struct
nvkm_object
*
);
int
nv04_fifo_init
(
struct
nvkm_object
*
);
#endif
#endif
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
View file @
8f0649b5
...
@@ -39,16 +39,11 @@ nv10_ramfc[] = {
...
@@ -39,16 +39,11 @@ nv10_ramfc[] = {
{}
{}
};
};
static
struct
nvkm_oclass
static
const
struct
nvkm_fifo_func
nv10_fifo_cclass
=
{
nv10_fifo_func
=
{
.
handle
=
NV_ENGCTX
(
FIFO
,
0x10
),
.
chan
=
{
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
&
nv10_fifo_dma_oclass
,
.
ctor
=
nv04_fifo_context_ctor
,
NULL
.
dtor
=
_nvkm_fifo_context_dtor
,
.
init
=
_nvkm_fifo_context_init
,
.
fini
=
_nvkm_fifo_context_fini
,
.
rd32
=
_nvkm_fifo_context_rd32
,
.
wr32
=
_nvkm_fifo_context_wr32
,
},
},
};
};
...
@@ -65,10 +60,10 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -65,10 +60,10 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
base
.
func
=
&
nv10_fifo_func
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_engine
(
fifo
)
->
cclass
=
&
nv10_fifo_cclass
;
nv_engine
(
fifo
)
->
sclass
=
nv10_fifo_sclass
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
ramfc_desc
=
nv10_ramfc
;
fifo
->
ramfc_desc
=
nv10_ramfc
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
View file @
8f0649b5
...
@@ -47,19 +47,6 @@ nv17_ramfc[] = {
...
@@ -47,19 +47,6 @@ nv17_ramfc[] = {
{}
{}
};
};
static
struct
nvkm_oclass
nv17_fifo_cclass
=
{
.
handle
=
NV_ENGCTX
(
FIFO
,
0x17
),
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
nv04_fifo_context_ctor
,
.
dtor
=
_nvkm_fifo_context_dtor
,
.
init
=
_nvkm_fifo_context_init
,
.
fini
=
_nvkm_fifo_context_fini
,
.
rd32
=
_nvkm_fifo_context_rd32
,
.
wr32
=
_nvkm_fifo_context_wr32
,
},
};
static
int
static
int
nv17_fifo_init
(
struct
nvkm_object
*
object
)
nv17_fifo_init
(
struct
nvkm_object
*
object
)
{
{
...
@@ -85,7 +72,7 @@ nv17_fifo_init(struct nvkm_object *object)
...
@@ -85,7 +72,7 @@ nv17_fifo_init(struct nvkm_object *object)
nvkm_wr32
(
device
,
NV03_PFIFO_RAMFC
,
nvkm_memory_addr
(
ramfc
)
>>
8
|
nvkm_wr32
(
device
,
NV03_PFIFO_RAMFC
,
nvkm_memory_addr
(
ramfc
)
>>
8
|
0x00010000
);
0x00010000
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
nr
-
1
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
...
@@ -96,6 +83,14 @@ nv17_fifo_init(struct nvkm_object *object)
...
@@ -96,6 +83,14 @@ nv17_fifo_init(struct nvkm_object *object)
return
0
;
return
0
;
}
}
static
const
struct
nvkm_fifo_func
nv17_fifo_func
=
{
.
chan
=
{
&
nv17_fifo_dma_oclass
,
NULL
},
};
static
int
static
int
nv17_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nv17_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
...
@@ -109,10 +104,10 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -109,10 +104,10 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
base
.
func
=
&
nv17_fifo_func
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_engine
(
fifo
)
->
cclass
=
&
nv17_fifo_cclass
;
nv_engine
(
fifo
)
->
sclass
=
nv17_fifo_sclass
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
ramfc_desc
=
nv17_ramfc
;
fifo
->
ramfc_desc
=
nv17_ramfc
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
View file @
8f0649b5
...
@@ -56,19 +56,6 @@ nv40_ramfc[] = {
...
@@ -56,19 +56,6 @@ nv40_ramfc[] = {
{}
{}
};
};
static
struct
nvkm_oclass
nv40_fifo_cclass
=
{
.
handle
=
NV_ENGCTX
(
FIFO
,
0x40
),
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
nv04_fifo_context_ctor
,
.
dtor
=
_nvkm_fifo_context_dtor
,
.
init
=
_nvkm_fifo_context_init
,
.
fini
=
_nvkm_fifo_context_fini
,
.
rd32
=
_nvkm_fifo_context_rd32
,
.
wr32
=
_nvkm_fifo_context_wr32
,
},
};
static
int
static
int
nv40_fifo_init
(
struct
nvkm_object
*
object
)
nv40_fifo_init
(
struct
nvkm_object
*
object
)
{
{
...
@@ -115,7 +102,7 @@ nv40_fifo_init(struct nvkm_object *object)
...
@@ -115,7 +102,7 @@ nv40_fifo_init(struct nvkm_object *object)
break
;
break
;
}
}
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
max
);
nvkm_wr32
(
device
,
NV03_PFIFO_CACHE1_PUSH1
,
fifo
->
base
.
nr
-
1
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
nvkm_wr32
(
device
,
NV03_PFIFO_INTR_EN_0
,
0xffffffff
);
...
@@ -126,6 +113,14 @@ nv40_fifo_init(struct nvkm_object *object)
...
@@ -126,6 +113,14 @@ nv40_fifo_init(struct nvkm_object *object)
return
0
;
return
0
;
}
}
static
const
struct
nvkm_fifo_func
nv40_fifo_func
=
{
.
chan
=
{
&
nv40_fifo_dma_oclass
,
NULL
},
};
static
int
static
int
nv40_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nv40_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
...
@@ -139,10 +134,10 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -139,10 +134,10 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
base
.
func
=
&
nv40_fifo_func
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_engine
(
fifo
)
->
cclass
=
&
nv40_fifo_cclass
;
nv_engine
(
fifo
)
->
sclass
=
nv40_fifo_sclass
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
ramfc_desc
=
nv40_ramfc
;
fifo
->
ramfc_desc
=
nv40_ramfc
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
View file @
8f0649b5
...
@@ -35,7 +35,7 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
...
@@ -35,7 +35,7 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
fifo
->
cur_runlist
=
!
fifo
->
cur_runlist
;
fifo
->
cur_runlist
=
!
fifo
->
cur_runlist
;
nvkm_kmap
(
cur
);
nvkm_kmap
(
cur
);
for
(
i
=
fifo
->
base
.
min
,
p
=
0
;
i
<
fifo
->
base
.
max
;
i
++
)
{
for
(
i
=
0
,
p
=
0
;
i
<
fifo
->
base
.
nr
;
i
++
)
{
if
(
nvkm_rd32
(
device
,
0x002600
+
(
i
*
4
))
&
0x80000000
)
if
(
nvkm_rd32
(
device
,
0x002600
+
(
i
*
4
))
&
0x80000000
)
nvkm_wo32
(
cur
,
p
++
*
4
,
i
);
nvkm_wo32
(
cur
,
p
++
*
4
,
i
);
}
}
...
@@ -94,6 +94,15 @@ nv50_fifo_dtor(struct nvkm_object *object)
...
@@ -94,6 +94,15 @@ nv50_fifo_dtor(struct nvkm_object *object)
nvkm_fifo_destroy
(
&
fifo
->
base
);
nvkm_fifo_destroy
(
&
fifo
->
base
);
}
}
static
const
struct
nvkm_fifo_func
nv50_fifo_func
=
{
.
chan
=
{
&
nv50_fifo_dma_oclass
,
&
nv50_fifo_gpfifo_oclass
,
NULL
},
};
static
int
static
int
nv50_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
nv50_fifo_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
...
@@ -108,6 +117,8 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -108,6 +117,8 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
fifo
->
base
.
func
=
&
nv50_fifo_func
;
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
128
*
4
,
0x1000
,
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
128
*
4
,
0x1000
,
false
,
&
fifo
->
runlist
[
0
]);
false
,
&
fifo
->
runlist
[
0
]);
if
(
ret
)
if
(
ret
)
...
@@ -120,8 +131,6 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -120,8 +131,6 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
unit
=
0x00000100
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_subdev
(
fifo
)
->
intr
=
nv04_fifo_intr
;
nv_engine
(
fifo
)
->
cclass
=
&
nv50_fifo_cclass
;
nv_engine
(
fifo
)
->
sclass
=
nv50_fifo_sclass
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
pause
=
nv04_fifo_pause
;
fifo
->
base
.
start
=
nv04_fifo_start
;
fifo
->
base
.
start
=
nv04_fifo_start
;
return
0
;
return
0
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
View file @
8f0649b5
#ifndef __NV50_FIFO_H__
#ifndef __NV50_FIFO_H__
#define __NV50_FIFO_H__
#define __NV50_FIFO_H__
#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
#include "priv.h"
#include "priv.h"
struct
nv50_fifo
{
struct
nv50_fifo
{
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
View file @
8f0649b5
#ifndef __NVKM_FIFO_PRIV_H__
#ifndef __NVKM_FIFO_PRIV_H__
#define __NVKM_FIFO_PRIV_H__
#define __NVKM_FIFO_PRIV_H__
#include <engine/fifo.h>
#include <engine/fifo.h>
#include <core/engctx.h>
void
nv04_fifo_pause
(
struct
nvkm_fifo
*
,
unsigned
long
*
);
void
nv04_fifo_pause
(
struct
nvkm_fifo
*
,
unsigned
long
*
);
void
nv04_fifo_start
(
struct
nvkm_fifo
*
,
unsigned
long
*
);
void
nv04_fifo_start
(
struct
nvkm_fifo
*
,
unsigned
long
*
);
...
...
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
View file @
8f0649b5
...
@@ -1168,10 +1168,14 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
...
@@ -1168,10 +1168,14 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
u32
data
=
nvkm_rd32
(
device
,
0x400708
);
u32
data
=
nvkm_rd32
(
device
,
0x400708
);
u32
code
=
nvkm_rd32
(
device
,
0x400110
);
u32
code
=
nvkm_rd32
(
device
,
0x400110
);
u32
class
;
u32
class
;
int
chid
;
const
char
*
name
=
"unknown"
;
int
chid
=
-
1
;
chan
=
nvkm_fifo_chan_inst
(
device
->
fifo
,
(
u64
)
inst
<<
12
,
&
flags
);
chan
=
nvkm_fifo_chan_inst
(
device
->
fifo
,
(
u64
)
inst
<<
12
,
&
flags
);
chid
=
chan
?
chan
->
chid
:
-
1
;
if
(
chan
)
{
name
=
chan
->
object
.
client
->
name
;
chid
=
chan
->
chid
;
}
if
(
nv_device
(
gr
)
->
card_type
<
NV_E0
||
subc
<
4
)
if
(
nv_device
(
gr
)
->
card_type
<
NV_E0
||
subc
<
4
)
class
=
nvkm_rd32
(
device
,
0x404200
+
(
subc
*
4
));
class
=
nvkm_rd32
(
device
,
0x404200
+
(
subc
*
4
));
...
@@ -1191,8 +1195,8 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
...
@@ -1191,8 +1195,8 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
if
(
!
gf100_gr_mthd_sw
(
device
,
class
,
mthd
,
data
))
{
if
(
!
gf100_gr_mthd_sw
(
device
,
class
,
mthd
,
data
))
{
nvkm_error
(
subdev
,
"ILLEGAL_MTHD ch %d [%010llx %s] "
nvkm_error
(
subdev
,
"ILLEGAL_MTHD ch %d [%010llx %s] "
"subc %d class %04x mthd %04x data %08x
\n
"
,
"subc %d class %04x mthd %04x data %08x
\n
"
,
chid
,
inst
<<
12
,
n
vkm_client_name
(
chan
)
,
chid
,
inst
<<
12
,
n
ame
,
subc
,
subc
,
class
,
mthd
,
data
);
class
,
mthd
,
data
);
}
}
nvkm_wr32
(
device
,
0x400100
,
0x00000010
);
nvkm_wr32
(
device
,
0x400100
,
0x00000010
);
stat
&=
~
0x00000010
;
stat
&=
~
0x00000010
;
...
@@ -1201,8 +1205,7 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
...
@@ -1201,8 +1205,7 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
if
(
stat
&
0x00000020
)
{
if
(
stat
&
0x00000020
)
{
nvkm_error
(
subdev
,
"ILLEGAL_CLASS ch %d [%010llx %s] "
nvkm_error
(
subdev
,
"ILLEGAL_CLASS ch %d [%010llx %s] "
"subc %d class %04x mthd %04x data %08x
\n
"
,
"subc %d class %04x mthd %04x data %08x
\n
"
,
chid
,
inst
<<
12
,
nvkm_client_name
(
chan
),
subc
,
chid
,
inst
<<
12
,
name
,
subc
,
class
,
mthd
,
data
);
class
,
mthd
,
data
);
nvkm_wr32
(
device
,
0x400100
,
0x00000020
);
nvkm_wr32
(
device
,
0x400100
,
0x00000020
);
stat
&=
~
0x00000020
;
stat
&=
~
0x00000020
;
}
}
...
@@ -1213,14 +1216,14 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
...
@@ -1213,14 +1216,14 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
nvkm_error
(
subdev
,
"DATA_ERROR %08x [%s] ch %d [%010llx %s] "
nvkm_error
(
subdev
,
"DATA_ERROR %08x [%s] ch %d [%010llx %s] "
"subc %d class %04x mthd %04x data %08x
\n
"
,
"subc %d class %04x mthd %04x data %08x
\n
"
,
code
,
en
?
en
->
name
:
""
,
chid
,
inst
<<
12
,
code
,
en
?
en
->
name
:
""
,
chid
,
inst
<<
12
,
n
vkm_client_name
(
chan
)
,
subc
,
class
,
mthd
,
data
);
n
ame
,
subc
,
class
,
mthd
,
data
);
nvkm_wr32
(
device
,
0x400100
,
0x00100000
);
nvkm_wr32
(
device
,
0x400100
,
0x00100000
);
stat
&=
~
0x00100000
;
stat
&=
~
0x00100000
;
}
}
if
(
stat
&
0x00200000
)
{
if
(
stat
&
0x00200000
)
{
nvkm_error
(
subdev
,
"TRAP ch %d [%010llx %s]
\n
"
,
nvkm_error
(
subdev
,
"TRAP ch %d [%010llx %s]
\n
"
,
chid
,
inst
<<
12
,
n
vkm_client_name
(
chan
)
);
chid
,
inst
<<
12
,
n
ame
);
gf100_gr_trap_intr
(
gr
);
gf100_gr_trap_intr
(
gr
);
nvkm_wr32
(
device
,
0x400100
,
0x00200000
);
nvkm_wr32
(
device
,
0x400100
,
0x00200000
);
stat
&=
~
0x00200000
;
stat
&=
~
0x00200000
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
View file @
8f0649b5
...
@@ -230,7 +230,8 @@ nv20_gr_intr(struct nvkm_subdev *subdev)
...
@@ -230,7 +230,8 @@ nv20_gr_intr(struct nvkm_subdev *subdev)
"nstatus %08x [%s] ch %d [%s] subc %d "
"nstatus %08x [%s] ch %d [%s] subc %d "
"class %04x mthd %04x data %08x
\n
"
,
"class %04x mthd %04x data %08x
\n
"
,
show
,
msg
,
nsource
,
src
,
nstatus
,
sta
,
chid
,
show
,
msg
,
nsource
,
src
,
nstatus
,
sta
,
chid
,
nvkm_client_name
(
chan
),
subc
,
class
,
mthd
,
data
);
chan
?
chan
->
object
.
client
->
name
:
"unknown"
,
subc
,
class
,
mthd
,
data
);
}
}
nvkm_fifo_chan_put
(
device
->
fifo
,
flags
,
&
chan
);
nvkm_fifo_chan_put
(
device
->
fifo
,
flags
,
&
chan
);
...
...
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
View file @
8f0649b5
...
@@ -353,7 +353,8 @@ nv40_gr_intr(struct nvkm_subdev *subdev)
...
@@ -353,7 +353,8 @@ nv40_gr_intr(struct nvkm_subdev *subdev)
"class %04x mthd %04x data %08x
\n
"
,
"class %04x mthd %04x data %08x
\n
"
,
show
,
msg
,
nsource
,
src
,
nstatus
,
sta
,
show
,
msg
,
nsource
,
src
,
nstatus
,
sta
,
chan
?
chan
->
fifo
->
chid
:
-
1
,
inst
<<
4
,
chan
?
chan
->
fifo
->
chid
:
-
1
,
inst
<<
4
,
nvkm_client_name
(
chan
),
subc
,
class
,
mthd
,
data
);
chan
?
chan
->
fifo
->
object
.
client
->
name
:
"unknown"
,
subc
,
class
,
mthd
,
data
);
}
}
spin_unlock_irqrestore
(
&
gr
->
base
.
engine
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
gr
->
base
.
engine
.
lock
,
flags
);
...
...
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
View file @
8f0649b5
...
@@ -608,7 +608,7 @@ nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
...
@@ -608,7 +608,7 @@ nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
static
int
static
int
nv50_gr_trap_handler
(
struct
nv50_gr
*
gr
,
u32
display
,
nv50_gr_trap_handler
(
struct
nv50_gr
*
gr
,
u32
display
,
int
chid
,
u64
inst
,
struct
nvkm_fifo_chan
*
chan
)
int
chid
,
u64
inst
,
const
char
*
name
)
{
{
struct
nvkm_subdev
*
subdev
=
&
gr
->
base
.
engine
.
subdev
;
struct
nvkm_subdev
*
subdev
=
&
gr
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_device
*
device
=
subdev
->
device
;
...
@@ -648,8 +648,7 @@ nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
...
@@ -648,8 +648,7 @@ nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
"ch %d [%010llx %s] subc %d "
"ch %d [%010llx %s] subc %d "
"class %04x mthd %04x data %08x%08x "
"class %04x mthd %04x data %08x%08x "
"400808 %08x 400848 %08x
\n
"
,
"400808 %08x 400848 %08x
\n
"
,
chid
,
inst
,
nvkm_client_name
(
chan
),
chid
,
inst
,
name
,
subc
,
class
,
mthd
,
subc
,
class
,
mthd
,
datah
,
datal
,
addr
,
r848
);
datah
,
datal
,
addr
,
r848
);
}
else
}
else
if
(
display
)
{
if
(
display
)
{
...
@@ -674,9 +673,8 @@ nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
...
@@ -674,9 +673,8 @@ nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
nvkm_error
(
subdev
,
nvkm_error
(
subdev
,
"ch %d [%010llx %s] subc %d "
"ch %d [%010llx %s] subc %d "
"class %04x mthd %04x data %08x "
"class %04x mthd %04x data %08x "
"40084c %08x
\n
"
,
chid
,
inst
,
"40084c %08x
\n
"
,
chid
,
inst
,
name
,
nvkm_client_name
(
chan
),
subc
,
subc
,
class
,
mthd
,
data
,
addr
);
class
,
mthd
,
data
,
addr
);
}
else
}
else
if
(
display
)
{
if
(
display
)
{
nvkm_error
(
subdev
,
"no stuck command?
\n
"
);
nvkm_error
(
subdev
,
"no stuck command?
\n
"
);
...
@@ -849,11 +847,15 @@ nv50_gr_intr(struct nvkm_subdev *subdev)
...
@@ -849,11 +847,15 @@ nv50_gr_intr(struct nvkm_subdev *subdev)
u32
show
=
stat
,
show_bitfield
=
stat
;
u32
show
=
stat
,
show_bitfield
=
stat
;
const
struct
nvkm_enum
*
en
;
const
struct
nvkm_enum
*
en
;
unsigned
long
flags
;
unsigned
long
flags
;
const
char
*
name
=
"unknown"
;
char
msg
[
128
];
char
msg
[
128
];
int
chid
;
int
chid
=
-
1
;
chan
=
nvkm_fifo_chan_inst
(
device
->
fifo
,
(
u64
)
inst
<<
12
,
&
flags
);
chan
=
nvkm_fifo_chan_inst
(
device
->
fifo
,
(
u64
)
inst
<<
12
,
&
flags
);
chid
=
chan
?
chan
->
chid
:
-
1
;
if
(
chan
)
{
name
=
chan
->
object
.
client
->
name
;
chid
=
chan
->
chid
;
}
if
(
show
&
0x00100000
)
{
if
(
show
&
0x00100000
)
{
u32
ecode
=
nvkm_rd32
(
device
,
0x400110
);
u32
ecode
=
nvkm_rd32
(
device
,
0x400110
);
...
@@ -864,7 +866,7 @@ nv50_gr_intr(struct nvkm_subdev *subdev)
...
@@ -864,7 +866,7 @@ nv50_gr_intr(struct nvkm_subdev *subdev)
}
}
if
(
stat
&
0x00200000
)
{
if
(
stat
&
0x00200000
)
{
if
(
!
nv50_gr_trap_handler
(
gr
,
show
,
chid
,
(
u64
)
inst
<<
12
,
chan
))
if
(
!
nv50_gr_trap_handler
(
gr
,
show
,
chid
,
(
u64
)
inst
<<
12
,
name
))
show
&=
~
0x00200000
;
show
&=
~
0x00200000
;
show_bitfield
&=
~
0x00200000
;
show_bitfield
&=
~
0x00200000
;
}
}
...
@@ -877,8 +879,8 @@ nv50_gr_intr(struct nvkm_subdev *subdev)
...
@@ -877,8 +879,8 @@ nv50_gr_intr(struct nvkm_subdev *subdev)
nvkm_snprintbf
(
msg
,
sizeof
(
msg
),
nv50_gr_intr_name
,
show
);
nvkm_snprintbf
(
msg
,
sizeof
(
msg
),
nv50_gr_intr_name
,
show
);
nvkm_error
(
subdev
,
"%08x [%s] ch %d [%010llx %s] subc %d "
nvkm_error
(
subdev
,
"%08x [%s] ch %d [%010llx %s] subc %d "
"class %04x mthd %04x data %08x
\n
"
,
"class %04x mthd %04x data %08x
\n
"
,
stat
,
msg
,
chid
,
(
u64
)
inst
<<
12
,
stat
,
msg
,
chid
,
(
u64
)
inst
<<
12
,
name
,
nvkm_client_name
(
chan
),
subc
,
class
,
mthd
,
data
);
subc
,
class
,
mthd
,
data
);
}
}
if
(
nvkm_rd32
(
device
,
0x400824
)
&
(
1
<<
31
))
if
(
nvkm_rd32
(
device
,
0x400824
)
&
(
1
<<
31
))
...
...
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
View file @
8f0649b5
...
@@ -231,8 +231,8 @@ nv31_mpeg_intr(struct nvkm_subdev *subdev)
...
@@ -231,8 +231,8 @@ nv31_mpeg_intr(struct nvkm_subdev *subdev)
if
(
show
)
{
if
(
show
)
{
nvkm_error
(
subdev
,
"ch %d [%s] %08x %08x %08x %08x
\n
"
,
nvkm_error
(
subdev
,
"ch %d [%s] %08x %08x %08x %08x
\n
"
,
mpeg
->
chan
?
mpeg
->
chan
->
fifo
->
chid
:
-
1
,
mpeg
->
chan
?
mpeg
->
chan
->
fifo
->
chid
:
-
1
,
nvkm_client_name
(
mpeg
->
chan
),
mpeg
->
chan
?
mpeg
->
chan
->
fifo
->
object
.
client
->
name
:
stat
,
type
,
mthd
,
data
);
"unknown"
,
stat
,
type
,
mthd
,
data
);
}
}
spin_unlock_irqrestore
(
&
mpeg
->
base
.
engine
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
mpeg
->
base
.
engine
.
lock
,
flags
);
...
...
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
View file @
8f0649b5
...
@@ -145,13 +145,11 @@ nv44_mpeg_intr(struct nvkm_subdev *subdev)
...
@@ -145,13 +145,11 @@ nv44_mpeg_intr(struct nvkm_subdev *subdev)
u32
mthd
=
nvkm_rd32
(
device
,
0x00b234
);
u32
mthd
=
nvkm_rd32
(
device
,
0x00b234
);
u32
data
=
nvkm_rd32
(
device
,
0x00b238
);
u32
data
=
nvkm_rd32
(
device
,
0x00b238
);
u32
show
=
stat
;
u32
show
=
stat
;
int
chid
=
-
1
;
spin_lock_irqsave
(
&
mpeg
->
base
.
engine
.
lock
,
flags
);
spin_lock_irqsave
(
&
mpeg
->
base
.
engine
.
lock
,
flags
);
list_for_each_entry
(
temp
,
&
mpeg
->
chan
,
head
)
{
list_for_each_entry
(
temp
,
&
mpeg
->
chan
,
head
)
{
if
(
temp
->
inst
>>
4
==
inst
)
{
if
(
temp
->
inst
>>
4
==
inst
)
{
chan
=
temp
;
chan
=
temp
;
chid
=
chan
->
fifo
->
chid
;
list_del
(
&
chan
->
head
);
list_del
(
&
chan
->
head
);
list_add
(
&
chan
->
head
,
&
mpeg
->
chan
);
list_add
(
&
chan
->
head
,
&
mpeg
->
chan
);
break
;
break
;
...
@@ -176,7 +174,8 @@ nv44_mpeg_intr(struct nvkm_subdev *subdev)
...
@@ -176,7 +174,8 @@ nv44_mpeg_intr(struct nvkm_subdev *subdev)
if
(
show
)
{
if
(
show
)
{
nvkm_error
(
subdev
,
"ch %d [%08x %s] %08x %08x %08x %08x
\n
"
,
nvkm_error
(
subdev
,
"ch %d [%08x %s] %08x %08x %08x %08x
\n
"
,
chid
,
inst
<<
4
,
nvkm_client_name
(
chan
),
chan
?
chan
->
fifo
->
chid
:
-
1
,
inst
<<
4
,
chan
?
chan
->
fifo
->
object
.
client
->
name
:
"unknown"
,
stat
,
type
,
mthd
,
data
);
stat
,
type
,
mthd
,
data
);
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
View file @
8f0649b5
...
@@ -84,7 +84,8 @@ g98_sec_intr(struct nvkm_falcon *sec, struct nvkm_fifo_chan *chan)
...
@@ -84,7 +84,8 @@ g98_sec_intr(struct nvkm_falcon *sec, struct nvkm_fifo_chan *chan)
nvkm_error
(
subdev
,
"DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
nvkm_error
(
subdev
,
"DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
"subc %d mthd %04x data %08x
\n
"
,
ssta
,
"subc %d mthd %04x data %08x
\n
"
,
ssta
,
en
?
en
->
name
:
"UNKNOWN"
,
chan
?
chan
->
chid
:
-
1
,
en
?
en
->
name
:
"UNKNOWN"
,
chan
?
chan
->
chid
:
-
1
,
chan
?
chan
->
inst
:
0
,
nvkm_client_name
(
chan
),
chan
?
chan
->
inst
->
addr
:
0
,
chan
?
chan
->
object
.
client
->
name
:
"unknown"
,
subc
,
mthd
,
data
);
subc
,
mthd
,
data
);
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
View file @
8f0649b5
...
@@ -24,8 +24,8 @@
...
@@ -24,8 +24,8 @@
#include "nv50.h"
#include "nv50.h"
#include <core/handle.h>
#include <core/handle.h>
#include <core/namedb.h>
#include <engine/disp.h>
#include <engine/disp.h>
#include <engine/fifo/chan.h>
#include <subdev/bar.h>
#include <subdev/bar.h>
#include <nvif/event.h>
#include <nvif/event.h>
...
@@ -136,7 +136,7 @@ nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -136,7 +136,7 @@ nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return
ret
;
return
ret
;
}
}
chan
->
vblank
.
channel
=
nv
_gpuobj
(
parent
->
parent
)
->
addr
>>
12
;
chan
->
vblank
.
channel
=
nv
km_fifo_chan
(
parent
)
->
inst
->
addr
>>
12
;
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
View file @
8f0649b5
...
@@ -338,7 +338,7 @@ void
...
@@ -338,7 +338,7 @@ void
gt215_clk_post
(
struct
nvkm_clk
*
clk
,
unsigned
long
*
flags
)
gt215_clk_post
(
struct
nvkm_clk
*
clk
,
unsigned
long
*
flags
)
{
{
struct
nvkm_device
*
device
=
clk
->
subdev
.
device
;
struct
nvkm_device
*
device
=
clk
->
subdev
.
device
;
struct
nvkm_fifo
*
fifo
=
nvkm_fifo
(
clk
)
;
struct
nvkm_fifo
*
fifo
=
device
->
fifo
;
if
(
fifo
&&
flags
)
if
(
fifo
&&
flags
)
fifo
->
start
(
fifo
,
flags
);
fifo
->
start
(
fifo
,
flags
);
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
View file @
8f0649b5
...
@@ -189,12 +189,14 @@ nv50_fb_intr(struct nvkm_subdev *subdev)
...
@@ -189,12 +189,14 @@ nv50_fb_intr(struct nvkm_subdev *subdev)
else
sc
=
NULL
;
else
sc
=
NULL
;
chan
=
nvkm_fifo_chan_inst
(
fifo
,
inst
,
&
flags
);
chan
=
nvkm_fifo_chan_inst
(
fifo
,
inst
,
&
flags
);
nvkm_error
(
subdev
,
"trapped %s at %02x%04x%04x on channel "
nvkm_error
(
subdev
,
"trapped %s at %02x%04x%04x on channel
%d [%08x %s]
"
"
%08x [%s]
engine %02x [%s] client %02x [%s] "
"engine %02x [%s] client %02x [%s] "
"subclient %02x [%s] reason %08x [%s]
\n
"
,
"subclient %02x [%s] reason %08x [%s]
\n
"
,
(
trap
[
5
]
&
0x00000100
)
?
"read"
:
"write"
,
(
trap
[
5
]
&
0x00000100
)
?
"read"
:
"write"
,
trap
[
5
]
&
0xff
,
trap
[
4
]
&
0xffff
,
trap
[
3
]
&
0xffff
,
inst
,
trap
[
5
]
&
0xff
,
trap
[
4
]
&
0xffff
,
trap
[
3
]
&
0xffff
,
nvkm_client_name
(
chan
),
st0
,
en
?
en
->
name
:
""
,
chan
?
chan
->
chid
:
-
1
,
inst
,
chan
?
chan
->
object
.
client
->
name
:
"unknown"
,
st0
,
en
?
en
->
name
:
""
,
st2
,
cl
?
cl
->
name
:
""
,
st3
,
sc
?
sc
->
name
:
""
,
st2
,
cl
?
cl
->
name
:
""
,
st3
,
sc
?
sc
->
name
:
""
,
st1
,
re
?
re
->
name
:
""
);
st1
,
re
?
re
->
name
:
""
);
nvkm_fifo_chan_put
(
fifo
,
flags
,
&
chan
);
nvkm_fifo_chan_put
(
fifo
,
flags
,
&
chan
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment