Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
d8e83994
Commit
d8e83994
authored
Aug 20, 2015
by
Ben Skeggs
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
drm/nouveau/imem: improve management of instance memory
Signed-off-by:
Ben Skeggs
<
bskeggs@redhat.com
>
parent
1de68568
Changes
34
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
34 changed files
with
968 additions
and
656 deletions
+968
-656
drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+12
-21
drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+53
-0
drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+2
-7
drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+8
-19
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+1
-0
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.c
+5
-1
drivers/gpu/drm/nouveau/nvkm/core/Kbuild
drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+1
-0
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+81
-52
drivers/gpu/drm/nouveau/nvkm/core/memory.c
drivers/gpu/drm/nouveau/nvkm/core/memory.c
+64
-0
drivers/gpu/drm/nouveau/nvkm/core/ramht.c
drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+0
-8
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+0
-9
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+0
-9
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+0
-9
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+3
-2
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+3
-2
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+3
-2
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+3
-2
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+0
-11
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+0
-4
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+0
-3
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+0
-90
drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+11
-32
drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+6
-26
drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+0
-3
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+192
-52
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+120
-94
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+99
-61
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
+0
-36
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+135
-11
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+143
-55
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+4
-27
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+19
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+0
-4
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+0
-4
No files found.
drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
View file @
d8e83994
#ifndef __NVKM_GPUOBJ_H__
#define __NVKM_GPUOBJ_H__
#include <core/object.h>
#include <core/memory.h>
#include <core/mm.h>
struct
nvkm_vma
;
struct
nvkm_vm
;
...
...
@@ -11,13 +12,23 @@ struct nvkm_vm;
struct
nvkm_gpuobj
{
struct
nvkm_object
object
;
struct
nvkm_object
*
parent
;
struct
nvkm_memory
*
memory
;
struct
nvkm_gpuobj
*
parent
;
struct
nvkm_mm_node
*
node
;
struct
nvkm_mm
heap
;
u32
flags
;
u64
addr
;
u32
size
;
const
struct
nvkm_gpuobj_func
*
func
;
};
struct
nvkm_gpuobj_func
{
void
(
*
acquire
)(
struct
nvkm_gpuobj
*
);
void
(
*
release
)(
struct
nvkm_gpuobj
*
);
u32
(
*
rd32
)(
struct
nvkm_gpuobj
*
,
u32
offset
);
void
(
*
wr32
)(
struct
nvkm_gpuobj
*
,
u32
offset
,
u32
data
);
};
static
inline
struct
nvkm_gpuobj
*
...
...
@@ -60,24 +71,4 @@ int _nvkm_gpuobj_init(struct nvkm_object *);
int
_nvkm_gpuobj_fini
(
struct
nvkm_object
*
,
bool
);
u32
_nvkm_gpuobj_rd32
(
struct
nvkm_object
*
,
u64
);
void
_nvkm_gpuobj_wr32
(
struct
nvkm_object
*
,
u64
,
u32
);
/* accessor macros - kmap()/done() must bracket use of the other accessor
* macros to guarantee correct behaviour across all chipsets
*/
#define nvkm_kmap(o) do { \
struct nvkm_gpuobj *_gpuobj = (o); \
(void)_gpuobj; \
} while(0)
#define nvkm_ro32(o,a) ({ \
u32 _data; \
nvkm_object_rd32(&(o)->object, (a), &_data); \
_data; \
})
#define nvkm_wo32(o,a,d) nvkm_object_wr32(&(o)->object, (a), (d))
#define nvkm_mo32(o,a,m,d) ({ \
u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
_data; \
})
#define nvkm_done(o) nvkm_kmap(o)
#endif
drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
0 → 100644
View file @
d8e83994
#ifndef __NVKM_MEMORY_H__
#define __NVKM_MEMORY_H__
#include <core/os.h>
struct
nvkm_device
;
struct
nvkm_vma
;
struct
nvkm_vm
;
enum
nvkm_memory_target
{
NVKM_MEM_TARGET_INST
,
NVKM_MEM_TARGET_VRAM
,
NVKM_MEM_TARGET_HOST
,
};
struct
nvkm_memory
{
const
struct
nvkm_memory_func
*
func
;
};
struct
nvkm_memory_func
{
void
*
(
*
dtor
)(
struct
nvkm_memory
*
);
enum
nvkm_memory_target
(
*
target
)(
struct
nvkm_memory
*
);
u64
(
*
addr
)(
struct
nvkm_memory
*
);
u64
(
*
size
)(
struct
nvkm_memory
*
);
void
(
*
boot
)(
struct
nvkm_memory
*
,
struct
nvkm_vm
*
);
void
__iomem
*
(
*
acquire
)(
struct
nvkm_memory
*
);
void
(
*
release
)(
struct
nvkm_memory
*
);
u32
(
*
rd32
)(
struct
nvkm_memory
*
,
u64
offset
);
void
(
*
wr32
)(
struct
nvkm_memory
*
,
u64
offset
,
u32
data
);
void
(
*
map
)(
struct
nvkm_memory
*
,
struct
nvkm_vma
*
,
u64
offset
);
};
void
nvkm_memory_ctor
(
const
struct
nvkm_memory_func
*
,
struct
nvkm_memory
*
);
int
nvkm_memory_new
(
struct
nvkm_device
*
,
enum
nvkm_memory_target
,
u64
size
,
u32
align
,
bool
zero
,
struct
nvkm_memory
**
);
void
nvkm_memory_del
(
struct
nvkm_memory
**
);
#define nvkm_memory_target(p) (p)->func->target(p)
#define nvkm_memory_addr(p) (p)->func->addr(p)
#define nvkm_memory_size(p) (p)->func->size(p)
#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
/* accessor macros - kmap()/done() must bracket use of the other accessor
* macros to guarantee correct behaviour across all chipsets
*/
#define nvkm_kmap(o) (o)->func->acquire(o)
#define nvkm_ro32(o,a) (o)->func->rd32((o), (a))
#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
#define nvkm_mo32(o,a,m,d) ({ \
u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
_data; \
})
#define nvkm_done(o) (o)->func->release(o)
#endif
drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
View file @
d8e83994
...
...
@@ -7,13 +7,8 @@ struct nvkm_vma;
struct
nvkm_bar
{
struct
nvkm_subdev
subdev
;
int
(
*
alloc
)(
struct
nvkm_bar
*
,
struct
nvkm_object
*
,
struct
nvkm_mem
*
,
struct
nvkm_object
**
);
int
(
*
kmap
)(
struct
nvkm_bar
*
,
struct
nvkm_mem
*
,
u32
flags
,
struct
nvkm_vma
*
);
int
(
*
umap
)(
struct
nvkm_bar
*
,
struct
nvkm_mem
*
,
u32
flags
,
struct
nvkm_vma
*
);
struct
nvkm_vm
*
(
*
kmap
)(
struct
nvkm_bar
*
);
int
(
*
umap
)(
struct
nvkm_bar
*
,
u64
size
,
int
type
,
struct
nvkm_vma
*
);
void
(
*
unmap
)(
struct
nvkm_bar
*
,
struct
nvkm_vma
*
);
void
(
*
flush
)(
struct
nvkm_bar
*
);
...
...
drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
View file @
d8e83994
#ifndef __NVKM_INSTMEM_H__
#define __NVKM_INSTMEM_H__
#include <core/subdev.h>
struct
nvkm_instobj
{
struct
nvkm_object
object
;
struct
list_head
head
;
u32
*
suspend
;
u64
addr
;
u32
size
;
};
static
inline
struct
nvkm_instobj
*
nv_memobj
(
void
*
obj
)
{
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
BUG_ON
(
!
nv_iclass
(
obj
,
NV_MEMOBJ_CLASS
));
#endif
return
obj
;
}
struct
nvkm_memory
;
struct
nvkm_instmem
{
struct
nvkm_subdev
subdev
;
struct
list_head
list
;
u32
reserved
;
int
(
*
alloc
)(
struct
nvkm_instmem
*
,
struct
nvkm_object
*
,
u32
size
,
u32
align
,
struct
nvkm_object
**
);
int
(
*
alloc
)(
struct
nvkm_instmem
*
,
u32
size
,
u32
align
,
bool
zero
,
struct
nvkm_memory
**
);
const
struct
nvkm_instmem_func
*
func
;
struct
nvkm_gpuobj
*
vbios
;
struct
nvkm_ramht
*
ramht
;
struct
nvkm_gpuobj
*
ramro
;
struct
nvkm_gpuobj
*
ramfc
;
};
struct
nvkm_instmem_func
{
...
...
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
View file @
d8e83994
...
...
@@ -97,6 +97,7 @@ int nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
int
nvkm_vm_new
(
struct
nvkm_device
*
,
u64
offset
,
u64
length
,
u64
mm_offset
,
struct
lock_class_key
*
,
struct
nvkm_vm
**
);
int
nvkm_vm_ref
(
struct
nvkm_vm
*
,
struct
nvkm_vm
**
,
struct
nvkm_gpuobj
*
pgd
);
int
nvkm_vm_boot
(
struct
nvkm_vm
*
,
u64
size
);
int
nvkm_vm_get
(
struct
nvkm_vm
*
,
u64
size
,
u32
page_shift
,
u32
access
,
struct
nvkm_vma
*
);
void
nvkm_vm_put
(
struct
nvkm_vma
*
);
...
...
drivers/gpu/drm/nouveau/nouveau_bo.c
View file @
d8e83994
...
...
@@ -1388,12 +1388,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem
->
bus
.
is_iomem
=
true
;
if
(
drm
->
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_TESLA
)
{
struct
nvkm_bar
*
bar
=
nvxx_bar
(
&
drm
->
device
);
int
page_shift
=
12
;
if
(
drm
->
device
.
info
.
family
>=
NV_DEVICE_INFO_V0_FERMI
)
page_shift
=
node
->
page_shift
;
ret
=
bar
->
umap
(
bar
,
node
,
NV_MEM_ACCESS_RW
,
ret
=
bar
->
umap
(
bar
,
node
->
size
<<
12
,
page_shift
,
&
node
->
bar_vma
);
if
(
ret
)
return
ret
;
nvkm_vm_map
(
&
node
->
bar_vma
,
node
);
mem
->
bus
.
offset
=
node
->
bar_vma
.
offset
;
}
break
;
...
...
drivers/gpu/drm/nouveau/nvkm/core/Kbuild
View file @
d8e83994
...
...
@@ -6,6 +6,7 @@ nvkm-y += nvkm/core/event.o
nvkm-y += nvkm/core/gpuobj.o
nvkm-y += nvkm/core/handle.o
nvkm-y += nvkm/core/ioctl.o
nvkm-y += nvkm/core/memory.o
nvkm-y += nvkm/core/mm.o
nvkm-y += nvkm/core/namedb.o
nvkm-y += nvkm/core/notify.o
...
...
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
View file @
d8e83994
...
...
@@ -28,6 +28,44 @@
#include <subdev/bar.h>
#include <subdev/mmu.h>
static
void
nvkm_gpuobj_release
(
struct
nvkm_gpuobj
*
gpuobj
)
{
if
(
gpuobj
->
node
)
{
nvkm_done
(
gpuobj
->
parent
);
return
;
}
nvkm_done
(
gpuobj
->
memory
);
}
static
void
nvkm_gpuobj_acquire
(
struct
nvkm_gpuobj
*
gpuobj
)
{
if
(
gpuobj
->
node
)
{
nvkm_kmap
(
gpuobj
->
parent
);
return
;
}
nvkm_kmap
(
gpuobj
->
memory
);
}
static
u32
nvkm_gpuobj_rd32
(
struct
nvkm_gpuobj
*
gpuobj
,
u32
offset
)
{
if
(
gpuobj
->
node
)
return
nvkm_ro32
(
gpuobj
->
parent
,
gpuobj
->
node
->
offset
+
offset
);
return
nvkm_ro32
(
gpuobj
->
memory
,
offset
);
}
static
void
nvkm_gpuobj_wr32
(
struct
nvkm_gpuobj
*
gpuobj
,
u32
offset
,
u32
data
)
{
if
(
gpuobj
->
node
)
{
nvkm_wo32
(
gpuobj
->
parent
,
gpuobj
->
node
->
offset
+
offset
,
data
);
return
;
}
nvkm_wo32
(
gpuobj
->
memory
,
offset
,
data
);
}
void
nvkm_gpuobj_destroy
(
struct
nvkm_gpuobj
*
gpuobj
)
{
...
...
@@ -46,17 +84,27 @@ nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
if
(
gpuobj
->
heap
.
block_size
)
nvkm_mm_fini
(
&
gpuobj
->
heap
);
nvkm_memory_del
(
&
gpuobj
->
memory
);
nvkm_object_destroy
(
&
gpuobj
->
object
);
}
static
const
struct
nvkm_gpuobj_func
nvkm_gpuobj_func
=
{
.
acquire
=
nvkm_gpuobj_acquire
,
.
release
=
nvkm_gpuobj_release
,
.
rd32
=
nvkm_gpuobj_rd32
,
.
wr32
=
nvkm_gpuobj_wr32
,
};
int
nvkm_gpuobj_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
u32
pclass
,
struct
nvkm_object
*
par
gpu
,
u32
size
,
u32
align
,
u32
flags
,
struct
nvkm_object
*
obj
gpu
,
u32
size
,
u32
align
,
u32
flags
,
int
length
,
void
**
pobject
)
{
struct
nvkm_instmem
*
imem
=
nvkm_instmem
(
parent
);
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nvkm_device
*
device
=
nv_device
(
parent
);
struct
nvkm_memory
*
memory
=
NULL
;
struct
nvkm_gpuobj
*
pargpu
=
NULL
;
struct
nvkm_gpuobj
*
gpuobj
;
struct
nvkm_mm
*
heap
=
NULL
;
int
ret
,
i
;
...
...
@@ -64,46 +112,39 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
*
pobject
=
NULL
;
if
(
par
gpu
)
{
while
((
pargpu
=
nv_pclass
(
par
gpu
,
NV_GPUOBJ_CLASS
)))
{
if
(
nv_gpuobj
(
par
gpu
)
->
heap
.
block_size
)
if
(
obj
gpu
)
{
while
((
objgpu
=
nv_pclass
(
obj
gpu
,
NV_GPUOBJ_CLASS
)))
{
if
(
nv_gpuobj
(
obj
gpu
)
->
heap
.
block_size
)
break
;
pargpu
=
par
gpu
->
parent
;
objgpu
=
obj
gpu
->
parent
;
}
if
(
WARN_ON
(
par
gpu
==
NULL
))
if
(
WARN_ON
(
obj
gpu
==
NULL
))
return
-
EINVAL
;
pargpu
=
nv_gpuobj
(
objgpu
);
addr
=
nv_gpuobj
(
pargpu
)
->
addr
;
heap
=
&
nv_gpuobj
(
pargpu
)
->
heap
;
atomic_inc
(
&
parent
->
refcount
);
addr
=
pargpu
->
addr
;
heap
=
&
pargpu
->
heap
;
}
else
{
ret
=
imem
->
alloc
(
imem
,
parent
,
size
,
align
,
&
parent
);
pargpu
=
parent
;
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
size
,
align
,
false
,
&
memory
)
;
if
(
ret
)
return
ret
;
addr
=
nv_memobj
(
pargpu
)
->
addr
;
size
=
nv_memobj
(
pargpu
)
->
size
;
if
(
bar
&&
bar
->
alloc
)
{
struct
nvkm_instobj
*
iobj
=
(
void
*
)
parent
;
struct
nvkm_mem
**
mem
=
(
void
*
)(
iobj
+
1
);
struct
nvkm_mem
*
node
=
*
mem
;
if
(
!
bar
->
alloc
(
bar
,
parent
,
node
,
&
pargpu
))
{
nvkm_object_ref
(
NULL
,
&
parent
);
parent
=
pargpu
;
}
}
addr
=
nvkm_memory_addr
(
memory
);
size
=
nvkm_memory_size
(
memory
);
}
ret
=
nvkm_object_create_
(
parent
,
engine
,
oclass
,
pclass
|
NV_GPUOBJ_CLASS
,
length
,
pobject
);
nvkm_object_ref
(
NULL
,
&
parent
);
gpuobj
=
*
pobject
;
if
(
ret
)
if
(
ret
)
{
nvkm_memory_del
(
&
memory
);
return
ret
;
}
gpuobj
->
func
=
&
nvkm_gpuobj_func
;
gpuobj
->
memory
=
memory
;
gpuobj
->
parent
=
pargpu
;
gpuobj
->
flags
=
flags
;
gpuobj
->
addr
=
addr
;
...
...
@@ -182,20 +223,14 @@ u32
_nvkm_gpuobj_rd32
(
struct
nvkm_object
*
object
,
u64
addr
)
{
struct
nvkm_gpuobj
*
gpuobj
=
nv_gpuobj
(
object
);
u32
data
;
if
(
gpuobj
->
node
)
addr
+=
gpuobj
->
node
->
offset
;
nvkm_object_rd32
(
gpuobj
->
parent
,
addr
,
&
data
);
return
data
;
return
nvkm_ro32
(
gpuobj
,
addr
);
}
void
_nvkm_gpuobj_wr32
(
struct
nvkm_object
*
object
,
u64
addr
,
u32
data
)
{
struct
nvkm_gpuobj
*
gpuobj
=
nv_gpuobj
(
object
);
if
(
gpuobj
->
node
)
addr
+=
gpuobj
->
node
->
offset
;
nvkm_object_wr32
(
gpuobj
->
parent
,
addr
,
data
);
nvkm_wo32
(
gpuobj
,
addr
,
data
);
}
static
struct
nvkm_oclass
...
...
@@ -231,14 +266,14 @@ nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
int
nvkm_gpuobj_map
(
struct
nvkm_gpuobj
*
gpuobj
,
u32
access
,
struct
nvkm_vma
*
vma
)
{
struct
nvkm_memory
*
memory
=
gpuobj
->
memory
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
gpuobj
);
int
ret
=
-
EINVAL
;
if
(
bar
&&
bar
->
umap
)
{
struct
nvkm_instobj
*
iobj
=
(
void
*
)
nv_pclass
(
nv_object
(
gpuobj
),
NV_MEMOBJ_CLASS
);
struct
nvkm_mem
**
mem
=
(
void
*
)(
iobj
+
1
);
ret
=
bar
->
umap
(
bar
,
*
mem
,
access
,
vma
);
ret
=
bar
->
umap
(
bar
,
gpuobj
->
size
,
12
,
vma
);
if
(
ret
==
0
)
nvkm_memory_map
(
memory
,
vma
,
0
);
}
return
ret
;
...
...
@@ -248,17 +283,11 @@ int
nvkm_gpuobj_map_vm
(
struct
nvkm_gpuobj
*
gpuobj
,
struct
nvkm_vm
*
vm
,
u32
access
,
struct
nvkm_vma
*
vma
)
{
struct
nvkm_instobj
*
iobj
=
(
void
*
)
nv_pclass
(
nv_object
(
gpuobj
),
NV_MEMOBJ_CLASS
);
struct
nvkm_mem
**
mem
=
(
void
*
)(
iobj
+
1
);
int
ret
;
ret
=
nvkm_vm_get
(
vm
,
gpuobj
->
size
,
12
,
access
,
vma
);
if
(
ret
)
return
ret
;
nvkm_vm_map
(
vma
,
*
mem
);
return
0
;
struct
nvkm_memory
*
memory
=
gpuobj
->
memory
;
int
ret
=
nvkm_vm_get
(
vm
,
gpuobj
->
size
,
12
,
access
,
vma
);
if
(
ret
==
0
)
nvkm_memory_map
(
memory
,
vma
,
0
);
return
ret
;
}
void
...
...
@@ -279,7 +308,7 @@ static void
nvkm_gpudup_dtor
(
struct
nvkm_object
*
object
)
{
struct
nvkm_gpuobj
*
gpuobj
=
(
void
*
)
object
;
nvkm_object_ref
(
NULL
,
&
gpuobj
->
parent
);
nvkm_object_ref
(
NULL
,
(
struct
nvkm_object
**
)
&
gpuobj
->
parent
);
nvkm_object_destroy
(
&
gpuobj
->
object
);
}
...
...
@@ -306,7 +335,7 @@ nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
if
(
ret
)
return
ret
;
nvkm_object_ref
(
nv_object
(
base
),
&
gpuobj
->
parent
);
nvkm_object_ref
(
nv_object
(
base
),
(
struct
nvkm_object
**
)
&
gpuobj
->
parent
);
gpuobj
->
addr
=
base
->
addr
;
gpuobj
->
size
=
base
->
size
;
return
0
;
...
...
drivers/gpu/drm/nouveau/nvkm/core/memory.c
0 → 100644
View file @
d8e83994
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include <core/memory.h>
#include <subdev/instmem.h>
void
nvkm_memory_ctor
(
const
struct
nvkm_memory_func
*
func
,
struct
nvkm_memory
*
memory
)
{
memory
->
func
=
func
;
}
void
nvkm_memory_del
(
struct
nvkm_memory
**
pmemory
)
{
struct
nvkm_memory
*
memory
=
*
pmemory
;
if
(
memory
&&
!
WARN_ON
(
!
memory
->
func
))
{
if
(
memory
->
func
->
dtor
)
*
pmemory
=
memory
->
func
->
dtor
(
memory
);
kfree
(
*
pmemory
);
*
pmemory
=
NULL
;
}
}
int
nvkm_memory_new
(
struct
nvkm_device
*
device
,
enum
nvkm_memory_target
target
,
u64
size
,
u32
align
,
bool
zero
,
struct
nvkm_memory
**
pmemory
)
{
struct
nvkm_instmem
*
imem
=
device
->
imem
;
struct
nvkm_memory
*
memory
;
int
ret
=
-
ENOSYS
;
if
(
unlikely
(
target
!=
NVKM_MEM_TARGET_INST
||
!
imem
))
return
-
ENOSYS
;
ret
=
imem
->
alloc
(
imem
,
size
,
align
,
zero
,
&
memory
);
if
(
ret
)
return
ret
;
*
pmemory
=
memory
;
return
0
;
}
drivers/gpu/drm/nouveau/nvkm/core/ramht.c
View file @
d8e83994
...
...
@@ -22,8 +22,6 @@
#include <core/ramht.h>
#include <core/engine.h>
#include <subdev/bar.h>
static
u32
nvkm_ramht_hash
(
struct
nvkm_ramht
*
ramht
,
int
chid
,
u32
handle
)
{
...
...
@@ -43,7 +41,6 @@ int
nvkm_ramht_insert
(
struct
nvkm_ramht
*
ramht
,
int
chid
,
u32
handle
,
u32
context
)
{
struct
nvkm_gpuobj
*
gpuobj
=
&
ramht
->
gpuobj
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
ramht
);
int
ret
=
-
ENOSPC
;
u32
co
,
ho
;
...
...
@@ -53,8 +50,6 @@ nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context)
if
(
!
nvkm_ro32
(
gpuobj
,
co
+
4
))
{
nvkm_wo32
(
gpuobj
,
co
+
0
,
handle
);
nvkm_wo32
(
gpuobj
,
co
+
4
,
context
);
if
(
bar
)
bar
->
flush
(
bar
);
ret
=
co
;
break
;
}
...
...
@@ -72,12 +67,9 @@ void
nvkm_ramht_remove
(
struct
nvkm_ramht
*
ramht
,
int
cookie
)
{
struct
nvkm_gpuobj
*
gpuobj
=
&
ramht
->
gpuobj
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
ramht
);
nvkm_kmap
(
gpuobj
);
nvkm_wo32
(
gpuobj
,
cookie
+
0
,
0x00000000
);
nvkm_wo32
(
gpuobj
,
cookie
+
4
,
0x00000000
);
if
(
bar
)
bar
->
flush
(
bar
);
nvkm_done
(
gpuobj
);
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
View file @
d8e83994
...
...
@@ -27,7 +27,6 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
#include <subdev/bar.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
...
...
@@ -41,7 +40,6 @@
static
int
g84_fifo_context_attach
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nvkm_gpuobj
*
ectx
=
(
void
*
)
object
;
u64
limit
=
ectx
->
addr
+
ectx
->
size
-
1
;
...
...
@@ -73,7 +71,6 @@ g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
upper_32_bits
(
start
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
eng
);
return
0
;
}
...
...
@@ -87,7 +84,6 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
u32
addr
,
save
,
engn
;
bool
done
;
...
...
@@ -128,7 +124,6 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
eng
);
return
0
;
}
...
...
@@ -175,7 +170,6 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
union
{
struct
nv50_channel_dma_v0
v0
;
}
*
args
=
data
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
;
int
ret
;
...
...
@@ -239,7 +233,6 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
nvkm_wo32
(
base
->
ramfc
,
0x88
,
base
->
cache
->
addr
>>
10
);
nvkm_wo32
(
base
->
ramfc
,
0x98
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
ramfc
);
return
0
;
}
...
...
@@ -252,7 +245,6 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
union
{
struct
nv50_channel_gpfifo_v0
v0
;
}
*
args
=
data
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
;
u64
ioffset
,
ilength
;
...
...
@@ -318,7 +310,6 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
nvkm_wo32
(
base
->
ramfc
,
0x88
,
base
->
cache
->
addr
>>
10
);
nvkm_wo32
(
base
->
ramfc
,
0x98
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
ramfc
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
View file @
d8e83994
...
...
@@ -27,7 +27,6 @@
#include <core/engctx.h>
#include <core/enum.h>
#include <core/handle.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
...
...
@@ -79,7 +78,6 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
{
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_gpuobj
*
cur
;
int
i
,
p
;
...
...
@@ -96,7 +94,6 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
p
+=
8
;
}
}
bar
->
flush
(
bar
);
nvkm_done
(
cur
);
nvkm_wr32
(
device
,
0x002270
,
cur
->
addr
>>
12
);
...
...
@@ -113,7 +110,6 @@ static int
gf100_fifo_context_attach
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
struct
nvkm_engctx
*
ectx
=
(
void
*
)
object
;
...
...
@@ -144,7 +140,6 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
nvkm_kmap
(
engn
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
lower_32_bits
(
ectx
->
vma
.
offset
)
|
4
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
upper_32_bits
(
ectx
->
vma
.
offset
));
bar
->
flush
(
bar
);
nvkm_done
(
engn
);
return
0
;
}
...
...
@@ -159,7 +154,6 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
u32
addr
;
switch
(
nv_engidx
(
object
->
engine
))
{
...
...
@@ -188,7 +182,6 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
nvkm_kmap
(
engn
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
0x00000000
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
0x00000000
);
bar
->
flush
(
bar
);
nvkm_done
(
engn
);
return
0
;
}
...
...
@@ -201,7 +194,6 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
union
{
struct
fermi_channel_gpfifo_v0
v0
;
}
*
args
=
data
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gf100_fifo
*
fifo
=
(
void
*
)
engine
;
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
;
struct
gf100_fifo_chan
*
chan
;
...
...
@@ -264,7 +256,6 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nvkm_wo32
(
ramfc
,
0xb8
,
0xf8000000
);
nvkm_wo32
(
ramfc
,
0xf8
,
0x10003080
);
/* 0x002310 */
nvkm_wo32
(
ramfc
,
0xfc
,
0x10000010
);
/* 0x002350 */
bar
->
flush
(
bar
);
nvkm_done
(
ramfc
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
View file @
d8e83994
...
...
@@ -27,7 +27,6 @@
#include <core/engctx.h>
#include <core/enum.h>
#include <core/handle.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
...
...
@@ -99,7 +98,6 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
struct
gk104_fifo_engn
*
engn
=
&
fifo
->
engine
[
engine
];
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_gpuobj
*
cur
;
int
i
,
p
;
...
...
@@ -116,7 +114,6 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
p
+=
8
;
}
}
bar
->
flush
(
bar
);
nvkm_done
(
cur
);
nvkm_wr32
(
device
,
0x002270
,
cur
->
addr
>>
12
);
...
...
@@ -133,7 +130,6 @@ static int
gk104_fifo_context_attach
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
struct
nvkm_engctx
*
ectx
=
(
void
*
)
object
;
...
...
@@ -168,7 +164,6 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
nvkm_kmap
(
engn
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
lower_32_bits
(
ectx
->
vma
.
offset
)
|
4
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
upper_32_bits
(
ectx
->
vma
.
offset
));
bar
->
flush
(
bar
);
nvkm_done
(
engn
);
return
0
;
}
...
...
@@ -198,7 +193,6 @@ static int
gk104_fifo_context_detach
(
struct
nvkm_object
*
parent
,
bool
suspend
,
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
...
...
@@ -226,7 +220,6 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
nvkm_kmap
(
engn
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
0x00000000
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
0x00000000
);
bar
->
flush
(
bar
);
nvkm_done
(
engn
);
}
...
...
@@ -241,7 +234,6 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
union
{
struct
kepler_channel_gpfifo_a_v0
v0
;
}
*
args
=
data
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gk104_fifo
*
fifo
=
(
void
*
)
engine
;
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
;
struct
gk104_fifo_chan
*
chan
;
...
...
@@ -320,7 +312,6 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nvkm_wo32
(
ramfc
,
0xb8
,
0xf8000000
);
nvkm_wo32
(
ramfc
,
0xf8
,
0x10003080
);
/* 0x002310 */
nvkm_wo32
(
ramfc
,
0xfc
,
0x10000010
);
/* 0x002350 */
bar
->
flush
(
bar
);
nvkm_done
(
ramfc
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
View file @
d8e83994
...
...
@@ -27,7 +27,7 @@
#include <core/engctx.h>
#include <core/handle.h>
#include <core/ramht.h>
#include <subdev/instmem
/nv04
.h>
#include <subdev/instmem.h>
#include <subdev/timer.h>
#include <nvif/class.h>
...
...
@@ -574,7 +574,8 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nv04_instmem
*
imem
=
nv04_instmem
(
parent
);
struct
nvkm_device
*
device
=
(
void
*
)
parent
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
struct
nv04_fifo
*
fifo
;
int
ret
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
View file @
d8e83994
...
...
@@ -26,7 +26,7 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
#include <subdev/instmem
/nv04
.h>
#include <subdev/instmem.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
...
...
@@ -145,7 +145,8 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nv04_instmem
*
imem
=
nv04_instmem
(
parent
);
struct
nvkm_device
*
device
=
(
void
*
)
parent
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
struct
nv04_fifo
*
fifo
;
int
ret
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
View file @
d8e83994
...
...
@@ -26,7 +26,7 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
#include <subdev/instmem
/nv04
.h>
#include <subdev/instmem.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
...
...
@@ -152,7 +152,8 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nv04_instmem
*
imem
=
nv04_instmem
(
parent
);
struct
nvkm_device
*
device
=
(
void
*
)
parent
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
struct
nv04_fifo
*
fifo
;
int
ret
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
View file @
d8e83994
...
...
@@ -27,7 +27,7 @@
#include <core/engctx.h>
#include <core/ramht.h>
#include <subdev/fb.h>
#include <subdev/instmem
/nv04
.h>
#include <subdev/instmem.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
...
...
@@ -276,7 +276,8 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nv04_instmem
*
imem
=
nv04_instmem
(
parent
);
struct
nvkm_device
*
device
=
(
void
*
)
parent
;
struct
nvkm_instmem
*
imem
=
device
->
imem
;
struct
nv04_fifo
*
fifo
;
int
ret
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
View file @
d8e83994
...
...
@@ -27,7 +27,6 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
#include <subdev/bar.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
...
...
@@ -42,7 +41,6 @@ static void
nv50_fifo_playlist_update_locked
(
struct
nv50_fifo
*
fifo
)
{
struct
nvkm_device
*
device
=
fifo
->
base
.
engine
.
subdev
.
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_gpuobj
*
cur
;
int
i
,
p
;
...
...
@@ -54,7 +52,6 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
if
(
nvkm_rd32
(
device
,
0x002600
+
(
i
*
4
))
&
0x80000000
)
nvkm_wo32
(
cur
,
p
++
*
4
,
i
);
}
bar
->
flush
(
bar
);
nvkm_done
(
cur
);
nvkm_wr32
(
device
,
0x0032f4
,
cur
->
addr
>>
12
);
...
...
@@ -73,7 +70,6 @@ nv50_fifo_playlist_update(struct nv50_fifo *fifo)
static
int
nv50_fifo_context_attach
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nvkm_gpuobj
*
ectx
=
(
void
*
)
object
;
u64
limit
=
ectx
->
addr
+
ectx
->
size
-
1
;
...
...
@@ -98,7 +94,6 @@ nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
upper_32_bits
(
start
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
eng
);
return
0
;
}
...
...
@@ -112,7 +107,6 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct
nv50_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
u32
addr
,
me
;
int
ret
=
0
;
...
...
@@ -159,7 +153,6 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
eng
);
}
...
...
@@ -205,7 +198,6 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
union
{
struct
nv50_channel_dma_v0
v0
;
}
*
args
=
data
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
;
int
ret
;
...
...
@@ -257,7 +249,6 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
bar
->
flush
(
bar
);
nvkm_done
(
base
->
ramfc
);
return
0
;
}
...
...
@@ -270,7 +261,6 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
union
{
struct
nv50_channel_gpfifo_v0
v0
;
}
*
args
=
data
;
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_fifo_base
*
base
=
(
void
*
)
parent
;
struct
nv50_fifo_chan
*
chan
;
u64
ioffset
,
ilength
;
...
...
@@ -324,7 +314,6 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
4
<<
24
)
/* SEARCH_FULL */
|
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
bar
->
flush
(
bar
);
nvkm_done
(
base
->
ramfc
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
View file @
d8e83994
...
...
@@ -23,7 +23,6 @@
*/
#include "ctxgf100.h"
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
...
...
@@ -1273,7 +1272,6 @@ gf100_grctx_generate(struct gf100_gr *gr)
struct
gf100_grctx_oclass
*
oclass
=
(
void
*
)
nv_engine
(
gr
)
->
cclass
;
struct
nvkm_subdev
*
subdev
=
&
gr
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_gpuobj
*
chan
;
struct
gf100_grctx
info
;
int
ret
,
i
;
...
...
@@ -1309,7 +1307,6 @@ gf100_grctx_generate(struct gf100_gr *gr)
/* context pointer (virt) */
nvkm_wo32
(
chan
,
0x0210
,
0x00080004
);
nvkm_wo32
(
chan
,
0x0214
,
0x00000000
);
bar
->
flush
(
bar
);
nvkm_done
(
chan
);
nvkm_wr32
(
device
,
0x100cb8
,
(
chan
->
addr
+
0x1000
)
>>
8
);
...
...
@@ -1341,7 +1338,6 @@ gf100_grctx_generate(struct gf100_gr *gr)
nvkm_wo32
(
chan
,
0x80020
,
0
);
nvkm_wo32
(
chan
,
0x80028
,
0
);
nvkm_wo32
(
chan
,
0x8002c
,
0
);
bar
->
flush
(
bar
);
nvkm_done
(
chan
);
}
else
{
nvkm_wr32
(
device
,
0x409840
,
0x80000000
);
...
...
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
View file @
d8e83994
...
...
@@ -23,7 +23,6 @@
*/
#include <engine/mpeg.h>
#include <subdev/bar.h>
#include <subdev/timer.h>
struct
nv50_mpeg_chan
{
...
...
@@ -84,7 +83,6 @@ nv50_mpeg_context_ctor(struct nvkm_object *parent,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nv50_mpeg_chan
*
chan
;
struct
nvkm_gpuobj
*
image
;
int
ret
;
...
...
@@ -100,7 +98,6 @@ nv50_mpeg_context_ctor(struct nvkm_object *parent,
nvkm_kmap
(
image
);
nvkm_wo32
(
image
,
0x0070
,
0x00801ec1
);
nvkm_wo32
(
image
,
0x007c
,
0x0000037c
);
bar
->
flush
(
bar
);
nvkm_done
(
image
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
View file @
d8e83994
...
...
@@ -23,96 +23,6 @@
*/
#include "priv.h"
#include <subdev/fb.h>
#include <subdev/mmu.h>
struct
nvkm_barobj
{
struct
nvkm_object
base
;
struct
nvkm_vma
vma
;
void
__iomem
*
iomem
;
};
static
int
nvkm_barobj_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_device
*
device
=
nv_device
(
parent
);
struct
nvkm_bar
*
bar
=
nvkm_bar
(
device
);
struct
nvkm_mem
*
mem
=
data
;
struct
nvkm_barobj
*
barobj
;
int
ret
;
ret
=
nvkm_object_create
(
parent
,
engine
,
oclass
,
0
,
&
barobj
);
*
pobject
=
nv_object
(
barobj
);
if
(
ret
)
return
ret
;
ret
=
bar
->
kmap
(
bar
,
mem
,
NV_MEM_ACCESS_RW
,
&
barobj
->
vma
);
if
(
ret
)
return
ret
;
barobj
->
iomem
=
ioremap
(
nv_device_resource_start
(
device
,
3
)
+
(
u32
)
barobj
->
vma
.
offset
,
mem
->
size
<<
12
);
if
(
!
barobj
->
iomem
)
{
nvkm_warn
(
&
bar
->
subdev
,
"PRAMIN ioremap failed
\n
"
);
return
-
ENOMEM
;
}
return
0
;
}
static
void
nvkm_barobj_dtor
(
struct
nvkm_object
*
object
)
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
object
);
struct
nvkm_barobj
*
barobj
=
(
void
*
)
object
;
if
(
barobj
->
vma
.
node
)
{
if
(
barobj
->
iomem
)
iounmap
(
barobj
->
iomem
);
bar
->
unmap
(
bar
,
&
barobj
->
vma
);
}
nvkm_object_destroy
(
&
barobj
->
base
);
}
static
u32
nvkm_barobj_rd32
(
struct
nvkm_object
*
object
,
u64
addr
)
{
struct
nvkm_barobj
*
barobj
=
(
void
*
)
object
;
return
ioread32_native
(
barobj
->
iomem
+
addr
);
}
static
void
nvkm_barobj_wr32
(
struct
nvkm_object
*
object
,
u64
addr
,
u32
data
)
{
struct
nvkm_barobj
*
barobj
=
(
void
*
)
object
;
iowrite32_native
(
data
,
barobj
->
iomem
+
addr
);
}
static
struct
nvkm_oclass
nvkm_barobj_oclass
=
{
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
nvkm_barobj_ctor
,
.
dtor
=
nvkm_barobj_dtor
,
.
init
=
_nvkm_object_init
,
.
fini
=
_nvkm_object_fini
,
.
rd32
=
nvkm_barobj_rd32
,
.
wr32
=
nvkm_barobj_wr32
,
},
};
int
nvkm_bar_alloc
(
struct
nvkm_bar
*
bar
,
struct
nvkm_object
*
parent
,
struct
nvkm_mem
*
mem
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_object
*
gpuobj
;
int
ret
=
nvkm_object_old
(
parent
,
&
parent
->
engine
->
subdev
.
object
,
&
nvkm_barobj_oclass
,
mem
,
0
,
&
gpuobj
);
if
(
ret
==
0
)
*
pobject
=
gpuobj
;
return
ret
;
}
int
nvkm_bar_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
int
length
,
void
**
pobject
)
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
View file @
d8e83994
...
...
@@ -39,35 +39,18 @@ struct gf100_bar {
struct
gf100_bar_vm
bar
[
2
];
};
static
int
gf100_bar_kmap
(
struct
nvkm_bar
*
obj
,
struct
nvkm_mem
*
mem
,
u32
flags
,
struct
nvkm_vma
*
vma
)
static
struct
nvkm_vm
*
gf100_bar_kmap
(
struct
nvkm_bar
*
obj
)
{
struct
gf100_bar
*
bar
=
container_of
(
obj
,
typeof
(
*
bar
),
base
);
int
ret
;
ret
=
nvkm_vm_get
(
bar
->
bar
[
0
].
vm
,
mem
->
size
<<
12
,
12
,
flags
,
vma
);
if
(
ret
)
return
ret
;
nvkm_vm_map
(
vma
,
mem
);
return
0
;
return
bar
->
bar
[
0
].
vm
;
}
static
int
gf100_bar_umap
(
struct
nvkm_bar
*
obj
,
struct
nvkm_mem
*
mem
,
u32
flags
,
struct
nvkm_vma
*
vma
)
gf100_bar_umap
(
struct
nvkm_bar
*
obj
,
u64
size
,
int
type
,
struct
nvkm_vma
*
vma
)
{
struct
gf100_bar
*
bar
=
container_of
(
obj
,
typeof
(
*
bar
),
base
);
int
ret
;
ret
=
nvkm_vm_get
(
bar
->
bar
[
1
].
vm
,
mem
->
size
<<
12
,
mem
->
page_shift
,
flags
,
vma
);
if
(
ret
)
return
ret
;
nvkm_vm_map
(
vma
,
mem
);
return
0
;
return
nvkm_vm_get
(
bar
->
bar
[
1
].
vm
,
size
,
type
,
NV_MEM_ACCESS_RW
,
vma
);
}
static
void
...
...
@@ -109,11 +92,7 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
* Bootstrap page table lookup.
*/
if
(
bar_nr
==
3
)
{
ret
=
nvkm_gpuobj_new
(
nv_object
(
bar
),
NULL
,
(
bar_len
>>
12
)
*
8
,
0x1000
,
NVOBJ_FLAG_ZERO_ALLOC
,
&
vm
->
pgt
[
0
].
obj
[
0
]);
vm
->
pgt
[
0
].
refcount
[
0
]
=
1
;
ret
=
nvkm_vm_boot
(
vm
,
bar_len
);
if
(
ret
)
return
ret
;
}
...
...
@@ -149,6 +128,10 @@ gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
return
ret
;
device
->
bar
=
&
bar
->
base
;
bar
->
base
.
flush
=
g84_bar_flush
;
spin_lock_init
(
&
bar
->
lock
);
/* BAR3 */
if
(
has_bar3
)
{
ret
=
gf100_bar_ctor_vm
(
bar
,
&
bar
->
bar
[
0
],
&
bar3_lock
,
3
);
...
...
@@ -161,14 +144,10 @@ gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
return
ret
;
if
(
has_bar3
)
{
bar
->
base
.
alloc
=
nvkm_bar_alloc
;
if
(
has_bar3
)
bar
->
base
.
kmap
=
gf100_bar_kmap
;
}
bar
->
base
.
umap
=
gf100_bar_umap
;
bar
->
base
.
unmap
=
gf100_bar_unmap
;
bar
->
base
.
flush
=
g84_bar_flush
;
spin_lock_init
(
&
bar
->
lock
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
View file @
d8e83994
...
...
@@ -40,34 +40,18 @@ struct nv50_bar {
struct
nvkm_gpuobj
*
bar3
;
};
static
int
nv50_bar_kmap
(
struct
nvkm_bar
*
obj
,
struct
nvkm_mem
*
mem
,
u32
flags
,
struct
nvkm_vma
*
vma
)
static
struct
nvkm_vm
*
nv50_bar_kmap
(
struct
nvkm_bar
*
obj
)
{
struct
nv50_bar
*
bar
=
container_of
(
obj
,
typeof
(
*
bar
),
base
);
int
ret
;
ret
=
nvkm_vm_get
(
bar
->
bar3_vm
,
mem
->
size
<<
12
,
12
,
flags
,
vma
);
if
(
ret
)
return
ret
;
nvkm_vm_map
(
vma
,
mem
);
return
0
;
return
bar
->
bar3_vm
;
}
static
int
nv50_bar_umap
(
struct
nvkm_bar
*
obj
,
struct
nvkm_mem
*
mem
,
u32
flags
,
struct
nvkm_vma
*
vma
)
nv50_bar_umap
(
struct
nvkm_bar
*
obj
,
u64
size
,
int
type
,
struct
nvkm_vma
*
vma
)
{
struct
nv50_bar
*
bar
=
container_of
(
obj
,
typeof
(
*
bar
),
base
);
int
ret
;
ret
=
nvkm_vm_get
(
bar
->
bar1_vm
,
mem
->
size
<<
12
,
12
,
flags
,
vma
);
if
(
ret
)
return
ret
;
nvkm_vm_map
(
vma
,
mem
);
return
0
;
return
nvkm_vm_get
(
bar
->
bar1_vm
,
size
,
type
,
NV_MEM_ACCESS_RW
,
vma
);
}
static
void
...
...
@@ -152,10 +136,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
atomic_inc
(
&
vm
->
engref
[
NVDEV_SUBDEV_BAR
]);
ret
=
nvkm_gpuobj_new
(
nv_object
(
bar
),
heap
,
((
limit
--
-
start
)
>>
12
)
*
8
,
0x1000
,
NVOBJ_FLAG_ZERO_ALLOC
,
&
vm
->
pgt
[
0
].
obj
[
0
]);
vm
->
pgt
[
0
].
refcount
[
0
]
=
1
;
ret
=
nvkm_vm_boot
(
vm
,
limit
--
-
start
);
if
(
ret
)
return
ret
;
...
...
@@ -207,7 +188,6 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nvkm_wo32
(
bar
->
bar1
,
0x14
,
0x00000000
);
nvkm_done
(
bar
->
bar1
);
bar
->
base
.
alloc
=
nvkm_bar_alloc
;
bar
->
base
.
kmap
=
nv50_bar_kmap
;
bar
->
base
.
umap
=
nv50_bar_umap
;
bar
->
base
.
unmap
=
nv50_bar_unmap
;
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
View file @
d8e83994
...
...
@@ -17,9 +17,6 @@ void _nvkm_bar_dtor(struct nvkm_object *);
#define _nvkm_bar_init _nvkm_subdev_init
#define _nvkm_bar_fini _nvkm_subdev_fini
int
nvkm_bar_alloc
(
struct
nvkm_bar
*
,
struct
nvkm_object
*
,
struct
nvkm_mem
*
,
struct
nvkm_object
**
);
void
g84_bar_flush
(
struct
nvkm_bar
*
);
int
gf100_bar_ctor
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
View file @
d8e83994
...
...
@@ -23,83 +23,221 @@
*/
#include "priv.h"
#include <core/engine.h>
#include <core/memory.h>
#include <subdev/bar.h>
/******************************************************************************
* instmem object base implementation
*****************************************************************************/
#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
void
_nvkm_instobj_dtor
(
struct
nvkm_object
*
object
)
struct
nvkm_instobj
{
struct
nvkm_memory
memory
;
struct
nvkm_memory
*
parent
;
struct
nvkm_instmem
*
imem
;
struct
list_head
head
;
u32
*
suspend
;
void
__iomem
*
map
;
};
static
enum
nvkm_memory_target
nvkm_instobj_target
(
struct
nvkm_memory
*
memory
)
{
memory
=
nvkm_instobj
(
memory
)
->
parent
;
return
nvkm_memory_target
(
memory
);
}
static
u64
nvkm_instobj_addr
(
struct
nvkm_memory
*
memory
)
{
memory
=
nvkm_instobj
(
memory
)
->
parent
;
return
nvkm_memory_addr
(
memory
);
}
static
u64
nvkm_instobj_size
(
struct
nvkm_memory
*
memory
)
{
memory
=
nvkm_instobj
(
memory
)
->
parent
;
return
nvkm_memory_size
(
memory
);
}
static
void
nvkm_instobj_release
(
struct
nvkm_memory
*
memory
)
{
struct
nvkm_instobj
*
iobj
=
nvkm_instobj
(
memory
);
struct
nvkm_bar
*
bar
=
iobj
->
imem
->
subdev
.
device
->
bar
;
if
(
bar
&&
bar
->
flush
)
bar
->
flush
(
bar
);
}
static
void
__iomem
*
nvkm_instobj_acquire
(
struct
nvkm_memory
*
memory
)
{
return
nvkm_instobj
(
memory
)
->
map
;
}
static
u32
nvkm_instobj_rd32
(
struct
nvkm_memory
*
memory
,
u64
offset
)
{
return
ioread32_native
(
nvkm_instobj
(
memory
)
->
map
+
offset
);
}
static
void
nvkm_instobj_wr32
(
struct
nvkm_memory
*
memory
,
u64
offset
,
u32
data
)
{
struct
nvkm_instmem
*
imem
=
nvkm_instmem
(
object
);
struct
nvkm_instobj
*
iobj
=
(
void
*
)
object
;
iowrite32_native
(
data
,
nvkm_instobj
(
memory
)
->
map
+
offset
);
}
static
void
nvkm_instobj_map
(
struct
nvkm_memory
*
memory
,
struct
nvkm_vma
*
vma
,
u64
offset
)
{
memory
=
nvkm_instobj
(
memory
)
->
parent
;
nvkm_memory_map
(
memory
,
vma
,
offset
);
}
mutex_lock
(
&
nv_subdev
(
imem
)
->
mutex
);
static
void
*
nvkm_instobj_dtor
(
struct
nvkm_memory
*
memory
)
{
struct
nvkm_instobj
*
iobj
=
nvkm_instobj
(
memory
);
list_del
(
&
iobj
->
head
);
mutex_unlock
(
&
nv_subdev
(
imem
)
->
mutex
);
nvkm_memory_del
(
&
iobj
->
parent
);
return
iobj
;
}
return
nvkm_object_destroy
(
&
iobj
->
object
);
const
struct
nvkm_memory_func
nvkm_instobj_func
=
{
.
dtor
=
nvkm_instobj_dtor
,
.
target
=
nvkm_instobj_target
,
.
addr
=
nvkm_instobj_addr
,
.
size
=
nvkm_instobj_size
,
.
acquire
=
nvkm_instobj_acquire
,
.
release
=
nvkm_instobj_release
,
.
rd32
=
nvkm_instobj_rd32
,
.
wr32
=
nvkm_instobj_wr32
,
.
map
=
nvkm_instobj_map
,
};
static
void
nvkm_instobj_boot
(
struct
nvkm_memory
*
memory
,
struct
nvkm_vm
*
vm
)
{
memory
=
nvkm_instobj
(
memory
)
->
parent
;
nvkm_memory_boot
(
memory
,
vm
);
}
int
nvkm_instobj_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
int
length
,
void
**
pobject
)
static
void
nvkm_instobj_release_slow
(
struct
nvkm_memory
*
memory
)
{
struct
nvkm_instobj
*
iobj
=
nvkm_instobj
(
memory
);
nvkm_instobj_release
(
memory
);
nvkm_done
(
iobj
->
parent
);
}
static
void
__iomem
*
nvkm_instobj_acquire_slow
(
struct
nvkm_memory
*
memory
)
{
struct
nvkm_instobj
*
iobj
=
nvkm_instobj
(
memory
);
iobj
->
map
=
nvkm_kmap
(
iobj
->
parent
);
if
(
iobj
->
map
)
memory
->
func
=
&
nvkm_instobj_func
;
return
iobj
->
map
;
}
static
u32
nvkm_instobj_rd32_slow
(
struct
nvkm_memory
*
memory
,
u64
offset
)
{
struct
nvkm_instobj
*
iobj
=
nvkm_instobj
(
memory
);
return
nvkm_ro32
(
iobj
->
parent
,
offset
);
}
static
void
nvkm_instobj_wr32_slow
(
struct
nvkm_memory
*
memory
,
u64
offset
,
u32
data
)
{
struct
nvkm_instobj
*
iobj
=
nvkm_instobj
(
memory
);
return
nvkm_wo32
(
iobj
->
parent
,
offset
,
data
);
}
const
struct
nvkm_memory_func
nvkm_instobj_func_slow
=
{
.
dtor
=
nvkm_instobj_dtor
,
.
target
=
nvkm_instobj_target
,
.
addr
=
nvkm_instobj_addr
,
.
size
=
nvkm_instobj_size
,
.
boot
=
nvkm_instobj_boot
,
.
acquire
=
nvkm_instobj_acquire_slow
,
.
release
=
nvkm_instobj_release_slow
,
.
rd32
=
nvkm_instobj_rd32_slow
,
.
wr32
=
nvkm_instobj_wr32_slow
,
.
map
=
nvkm_instobj_map
,
};
static
int
nvkm_instobj_new
(
struct
nvkm_instmem
*
imem
,
u32
size
,
u32
align
,
bool
zero
,
struct
nvkm_memory
**
pmemory
)
{
struct
nvkm_instmem
*
imem
=
nvkm_instmem
(
parent
);
struct
nvkm_instmem_impl
*
impl
=
(
void
*
)
imem
->
subdev
.
object
.
oclass
;
struct
nvkm_memory
*
memory
;
struct
nvkm_instobj
*
iobj
;
u32
offset
;
int
ret
;
ret
=
nvkm_object_create_
(
parent
,
engine
,
oclass
,
NV_MEMOBJ_CLASS
,
length
,
pobject
);
iobj
=
*
pobject
;
ret
=
impl
->
memory_new
(
imem
,
size
,
align
,
zero
,
&
memory
);
if
(
ret
)
return
ret
;
goto
done
;
mutex_lock
(
&
imem
->
subdev
.
mutex
);
list_add
(
&
iobj
->
head
,
&
imem
->
list
);
mutex_unlock
(
&
imem
->
subdev
.
mutex
);
return
0
;
if
(
!
impl
->
persistent
)
{
if
(
!
(
iobj
=
kzalloc
(
sizeof
(
*
iobj
),
GFP_KERNEL
)))
{
ret
=
-
ENOMEM
;
goto
done
;
}
nvkm_memory_ctor
(
&
nvkm_instobj_func_slow
,
&
iobj
->
memory
);
iobj
->
parent
=
memory
;
iobj
->
imem
=
imem
;
list_add_tail
(
&
iobj
->
head
,
&
imem
->
list
);
memory
=
&
iobj
->
memory
;
}
if
(
!
impl
->
zero
&&
zero
)
{
void
__iomem
*
map
=
nvkm_kmap
(
memory
);
if
(
unlikely
(
!
map
))
{
for
(
offset
=
0
;
offset
<
size
;
offset
+=
4
)
nvkm_wo32
(
memory
,
offset
,
0x00000000
);
}
else
{
memset_io
(
map
,
0x00
,
size
);
}
nvkm_done
(
memory
);
}
done:
if
(
ret
)
nvkm_memory_del
(
&
memory
);
*
pmemory
=
memory
;
return
ret
;
}
/******************************************************************************
* instmem subdev base implementation
*****************************************************************************/
static
int
nvkm_instmem_alloc
(
struct
nvkm_instmem
*
imem
,
struct
nvkm_object
*
parent
,
u32
size
,
u32
align
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_instmem_impl
*
impl
=
(
void
*
)
imem
->
subdev
.
object
.
oclass
;
struct
nvkm_instobj_args
args
=
{
.
size
=
size
,
.
align
=
align
};
return
nvkm_object_old
(
parent
,
&
parent
->
engine
->
subdev
.
object
,
impl
->
instobj
,
&
args
,
sizeof
(
args
),
pobject
);
}
int
_nvkm_instmem_fini
(
struct
nvkm_object
*
object
,
bool
suspend
)
{
struct
nvkm_instmem
*
imem
=
(
void
*
)
object
;
struct
nvkm_instobj
*
iobj
;
int
i
,
ret
=
0
;
int
i
;
if
(
suspend
)
{
mutex_lock
(
&
imem
->
subdev
.
mutex
);
list_for_each_entry
(
iobj
,
&
imem
->
list
,
head
)
{
iobj
->
suspend
=
vmalloc
(
iobj
->
size
);
if
(
!
iobj
->
suspend
)
{
ret
=
-
ENOMEM
;
break
;
}
for
(
i
=
0
;
i
<
iobj
->
size
;
i
+=
4
)
{
nvkm_object_rd32
(
&
iobj
->
object
,
i
,
(
u32
*
)
&
iobj
->
suspend
[
i
/
4
]);
}
struct
nvkm_memory
*
memory
=
iobj
->
parent
;
u64
size
=
nvkm_memory_size
(
memory
);
iobj
->
suspend
=
vmalloc
(
size
);
if
(
!
iobj
->
suspend
)
return
-
ENOMEM
;
for
(
i
=
0
;
i
<
size
;
i
+=
4
)
iobj
->
suspend
[
i
/
4
]
=
nvkm_ro32
(
memory
,
i
);
}
mutex_unlock
(
&
imem
->
subdev
.
mutex
);
if
(
ret
)
return
ret
;
}
return
nvkm_subdev_fini_old
(
&
imem
->
subdev
,
suspend
);
...
...
@@ -116,18 +254,17 @@ _nvkm_instmem_init(struct nvkm_object *object)
if
(
ret
)
return
ret
;
mutex_lock
(
&
imem
->
subdev
.
mutex
);
list_for_each_entry
(
iobj
,
&
imem
->
list
,
head
)
{
if
(
iobj
->
suspend
)
{
for
(
i
=
0
;
i
<
iobj
->
size
;
i
+=
4
)
{
nvkm_object_wr32
(
&
iobj
->
object
,
i
,
*
(
u32
*
)
&
iobj
->
suspend
[
i
/
4
]);
}
struct
nvkm_memory
*
memory
=
iobj
->
parent
;
u64
size
=
nvkm_memory_size
(
memory
);
for
(
i
=
0
;
i
<
size
;
i
+=
4
)
nvkm_wo32
(
memory
,
i
,
iobj
->
suspend
[
i
/
4
]);
vfree
(
iobj
->
suspend
);
iobj
->
suspend
=
NULL
;
}
}
mutex_unlock
(
&
imem
->
subdev
.
mutex
);
return
0
;
}
...
...
@@ -135,6 +272,7 @@ int
nvkm_instmem_create_
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
int
length
,
void
**
pobject
)
{
struct
nvkm_device
*
device
=
(
void
*
)
parent
;
struct
nvkm_instmem
*
imem
;
int
ret
;
...
...
@@ -144,7 +282,9 @@ nvkm_instmem_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
return
ret
;
device
->
imem
=
imem
;
INIT_LIST_HEAD
(
&
imem
->
list
);
imem
->
alloc
=
nvkm_inst
mem_alloc
;
imem
->
alloc
=
nvkm_inst
obj_new
;
return
0
;
}
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
View file @
d8e83994
This diff is collapsed.
Click to expand it.
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
View file @
d8e83994
...
...
@@ -21,83 +21,119 @@
*
* Authors: Ben Skeggs
*/
#include "nv04.h"
#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
#include "priv.h"
#include <core/memory.h>
#include <core/ramht.h>
struct
nv04_instmem
{
struct
nvkm_instmem
base
;
struct
nvkm_mm
heap
;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
static
u32
nv04_instobj_rd32
(
struct
nvkm_object
*
object
,
u64
addr
)
struct
nv04_instobj
{
struct
nvkm_memory
memory
;
struct
nv04_instmem
*
imem
;
struct
nvkm_mm_node
*
node
;
};
static
enum
nvkm_memory_target
nv04_instobj_target
(
struct
nvkm_memory
*
memory
)
{
return
NVKM_MEM_TARGET_INST
;
}
static
u64
nv04_instobj_addr
(
struct
nvkm_memory
*
memory
)
{
return
nv04_instobj
(
memory
)
->
node
->
offset
;
}
static
u64
nv04_instobj_size
(
struct
nvkm_memory
*
memory
)
{
return
nv04_instobj
(
memory
)
->
node
->
length
;
}
static
void
__iomem
*
nv04_instobj_acquire
(
struct
nvkm_memory
*
memory
)
{
struct
nv
km_instmem
*
imem
=
nvkm_instmem
(
object
);
struct
nv
04_instobj
*
node
=
(
void
*
)
object
;
return
imem
->
func
->
rd32
(
imem
,
node
->
mem
->
offset
+
addr
)
;
struct
nv
04_instobj
*
iobj
=
nv04_instobj
(
memory
);
struct
nv
km_device
*
device
=
iobj
->
imem
->
base
.
subdev
.
device
;
return
device
->
pri
+
0x700000
+
iobj
->
node
->
offset
;
}
static
void
nv04_instobj_
wr32
(
struct
nvkm_object
*
object
,
u64
addr
,
u32
data
)
nv04_instobj_
release
(
struct
nvkm_memory
*
memory
)
{
struct
nvkm_instmem
*
imem
=
nvkm_instmem
(
object
);
struct
nv04_instobj
*
node
=
(
void
*
)
object
;
imem
->
func
->
wr32
(
imem
,
node
->
mem
->
offset
+
addr
,
data
);
}
static
u32
nv04_instobj_rd32
(
struct
nvkm_memory
*
memory
,
u64
offset
)
{
struct
nv04_instobj
*
iobj
=
nv04_instobj
(
memory
);
struct
nvkm_device
*
device
=
iobj
->
imem
->
base
.
subdev
.
device
;
return
nvkm_rd32
(
device
,
0x700000
+
iobj
->
node
->
offset
+
offset
);
}
static
void
nv04_instobj_
dtor
(
struct
nvkm_object
*
object
)
nv04_instobj_
wr32
(
struct
nvkm_memory
*
memory
,
u64
offset
,
u32
data
)
{
struct
nv04_instmem
*
imem
=
(
void
*
)
nvkm_instmem
(
object
);
struct
nv04_instobj
*
node
=
(
void
*
)
object
;
mutex_lock
(
&
imem
->
base
.
subdev
.
mutex
);
nvkm_mm_free
(
&
imem
->
heap
,
&
node
->
mem
);
mutex_unlock
(
&
imem
->
base
.
subdev
.
mutex
);
nvkm_instobj_destroy
(
&
node
->
base
);
struct
nv04_instobj
*
iobj
=
nv04_instobj
(
memory
);
struct
nvkm_device
*
device
=
iobj
->
imem
->
base
.
subdev
.
device
;
nvkm_wr32
(
device
,
0x700000
+
iobj
->
node
->
offset
+
offset
,
data
);
}
static
void
*
nv04_instobj_dtor
(
struct
nvkm_memory
*
memory
)
{
struct
nv04_instobj
*
iobj
=
nv04_instobj
(
memory
);
mutex_lock
(
&
iobj
->
imem
->
base
.
subdev
.
mutex
);
nvkm_mm_free
(
&
iobj
->
imem
->
heap
,
&
iobj
->
node
);
mutex_unlock
(
&
iobj
->
imem
->
base
.
subdev
.
mutex
);
return
iobj
;
}
static
const
struct
nvkm_memory_func
nv04_instobj_func
=
{
.
dtor
=
nv04_instobj_dtor
,
.
target
=
nv04_instobj_target
,
.
size
=
nv04_instobj_size
,
.
addr
=
nv04_instobj_addr
,
.
acquire
=
nv04_instobj_acquire
,
.
release
=
nv04_instobj_release
,
.
rd32
=
nv04_instobj_rd32
,
.
wr32
=
nv04_instobj_wr32
,
};
static
int
nv04_instobj_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
nv04_instobj_new
(
struct
nvkm_instmem
*
base
,
u32
size
,
u32
align
,
bool
zero
,
struct
nvkm_memory
**
pmemory
)
{
struct
nv04_instmem
*
imem
=
(
void
*
)
nvkm_instmem
(
parent
);
struct
nv04_instobj
*
node
;
struct
nvkm_instobj_args
*
args
=
data
;
struct
nv04_instmem
*
imem
=
nv04_instmem
(
base
);
struct
nv04_instobj
*
iobj
;
int
ret
;
if
(
!
args
->
align
)
args
->
align
=
1
;
if
(
!
(
iobj
=
kzalloc
(
sizeof
(
*
iobj
),
GFP_KERNEL
)))
return
-
ENOMEM
;
*
pmemory
=
&
iobj
->
memory
;
ret
=
nvkm_instobj_create
(
parent
,
engine
,
oclass
,
&
node
);
*
pobject
=
nv_object
(
node
);
if
(
ret
)
return
ret
;
nvkm_memory_ctor
(
&
nv04_instobj_func
,
&
iobj
->
memory
);
iobj
->
imem
=
imem
;
mutex_lock
(
&
imem
->
base
.
subdev
.
mutex
);
ret
=
nvkm_mm_head
(
&
imem
->
heap
,
0
,
1
,
args
->
size
,
args
->
size
,
a
rgs
->
align
,
&
node
->
mem
);
ret
=
nvkm_mm_head
(
&
imem
->
heap
,
0
,
1
,
size
,
size
,
a
lign
?
align
:
1
,
&
iobj
->
node
);
mutex_unlock
(
&
imem
->
base
.
subdev
.
mutex
);
if
(
ret
)
return
ret
;
node
->
base
.
addr
=
node
->
mem
->
offset
;
node
->
base
.
size
=
node
->
mem
->
length
;
return
0
;
return
ret
;
}
struct
nvkm_instobj_impl
nv04_instobj_oclass
=
{
.
base
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
nv04_instobj_ctor
,
.
dtor
=
nv04_instobj_dtor
,
.
init
=
_nvkm_instobj_init
,
.
fini
=
_nvkm_instobj_fini
,
.
rd32
=
nv04_instobj_rd32
,
.
wr32
=
nv04_instobj_wr32
,
},
};
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
...
...
@@ -114,17 +150,15 @@ nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
nvkm_wr32
(
imem
->
subdev
.
device
,
0x700000
+
addr
,
data
);
}
void
static
void
nv04_instmem_dtor
(
struct
nvkm_object
*
object
)
{
struct
nv04_instmem
*
imem
=
(
void
*
)
object
;
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
ramfc
);
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
ramro
);
nvkm_ramht_ref
(
NULL
,
&
imem
->
ramht
);
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
vbios
);
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
base
.
ramfc
);
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
base
.
ramro
);
nvkm_ramht_ref
(
NULL
,
&
imem
->
base
.
ramht
);
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
base
.
vbios
);
nvkm_mm_fini
(
&
imem
->
heap
);
if
(
imem
->
iomem
)
iounmap
(
imem
->
iomem
);
nvkm_instmem_destroy
(
&
imem
->
base
);
}
...
...
@@ -158,24 +192,26 @@ nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
/* 0x00000-0x10000: reserve for probable vbios image */
ret
=
nvkm_gpuobj_new
(
nv_object
(
imem
),
NULL
,
0x10000
,
0
,
0
,
&
imem
->
vbios
);
&
imem
->
base
.
vbios
);
if
(
ret
)
return
ret
;
/* 0x10000-0x18000: reserve for RAMHT */
ret
=
nvkm_ramht_new
(
nv_object
(
imem
),
NULL
,
0x08000
,
0
,
&
imem
->
ramht
);
ret
=
nvkm_ramht_new
(
nv_object
(
imem
),
NULL
,
0x08000
,
0
,
&
imem
->
base
.
ramht
);
if
(
ret
)
return
ret
;
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
ret
=
nvkm_gpuobj_new
(
nv_object
(
imem
),
NULL
,
0x00800
,
0
,
NVOBJ_FLAG_ZERO_ALLOC
,
&
imem
->
ramfc
);
NVOBJ_FLAG_ZERO_ALLOC
,
&
imem
->
base
.
ramfc
);
if
(
ret
)
return
ret
;
/* 0x18800-0x18a00: reserve for RAMRO */
ret
=
nvkm_gpuobj_new
(
nv_object
(
imem
),
NULL
,
0x00200
,
0
,
0
,
&
imem
->
ramro
);
&
imem
->
base
.
ramro
);
if
(
ret
)
return
ret
;
...
...
@@ -191,5 +227,7 @@ nv04_instmem_oclass = &(struct nvkm_instmem_impl) {
.
init
=
_nvkm_instmem_init
,
.
fini
=
_nvkm_instmem_fini
,
},
.
instobj
=
&
nv04_instobj_oclass
.
base
,
.
memory_new
=
nv04_instobj_new
,
.
persistent
=
false
,
.
zero
=
false
,
}.
base
;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
deleted
100644 → 0
View file @
1de68568
#ifndef __NV04_INSTMEM_H__
#define __NV04_INSTMEM_H__
#include "priv.h"
#include <core/mm.h>
extern
struct
nvkm_instobj_impl
nv04_instobj_oclass
;
struct
nv04_instmem
{
struct
nvkm_instmem
base
;
void
__iomem
*
iomem
;
struct
nvkm_mm
heap
;
struct
nvkm_gpuobj
*
vbios
;
struct
nvkm_ramht
*
ramht
;
struct
nvkm_gpuobj
*
ramro
;
struct
nvkm_gpuobj
*
ramfc
;
};
static
inline
struct
nv04_instmem
*
nv04_instmem
(
void
*
obj
)
{
return
(
void
*
)
nvkm_instmem
(
obj
);
}
struct
nv04_instobj
{
struct
nvkm_instobj
base
;
struct
nvkm_mm_node
*
mem
;
};
void
nv04_instmem_dtor
(
struct
nvkm_object
*
);
int
nv04_instmem_alloc
(
struct
nvkm_instmem
*
,
struct
nvkm_object
*
,
u32
size
,
u32
align
,
struct
nvkm_object
**
pobject
);
#endif
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
View file @
d8e83994
...
...
@@ -21,11 +21,118 @@
*
* Authors: Ben Skeggs
*/
#include "nv04.h"
#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
#include "priv.h"
#include <core/memory.h>
#include <core/ramht.h>
#include <engine/gr/nv40.h>
struct
nv40_instmem
{
struct
nvkm_instmem
base
;
struct
nvkm_mm
heap
;
void
__iomem
*
iomem
;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
struct
nv40_instobj
{
struct
nvkm_memory
memory
;
struct
nv40_instmem
*
imem
;
struct
nvkm_mm_node
*
node
;
};
static
enum
nvkm_memory_target
nv40_instobj_target
(
struct
nvkm_memory
*
memory
)
{
return
NVKM_MEM_TARGET_INST
;
}
static
u64
nv40_instobj_addr
(
struct
nvkm_memory
*
memory
)
{
return
nv40_instobj
(
memory
)
->
node
->
offset
;
}
static
u64
nv40_instobj_size
(
struct
nvkm_memory
*
memory
)
{
return
nv40_instobj
(
memory
)
->
node
->
length
;
}
static
void
__iomem
*
nv40_instobj_acquire
(
struct
nvkm_memory
*
memory
)
{
struct
nv40_instobj
*
iobj
=
nv40_instobj
(
memory
);
return
iobj
->
imem
->
iomem
+
iobj
->
node
->
offset
;
}
static
void
nv40_instobj_release
(
struct
nvkm_memory
*
memory
)
{
}
static
u32
nv40_instobj_rd32
(
struct
nvkm_memory
*
memory
,
u64
offset
)
{
struct
nv40_instobj
*
iobj
=
nv40_instobj
(
memory
);
return
ioread32_native
(
iobj
->
imem
->
iomem
+
iobj
->
node
->
offset
+
offset
);
}
static
void
nv40_instobj_wr32
(
struct
nvkm_memory
*
memory
,
u64
offset
,
u32
data
)
{
struct
nv40_instobj
*
iobj
=
nv40_instobj
(
memory
);
iowrite32_native
(
data
,
iobj
->
imem
->
iomem
+
iobj
->
node
->
offset
+
offset
);
}
static
void
*
nv40_instobj_dtor
(
struct
nvkm_memory
*
memory
)
{
struct
nv40_instobj
*
iobj
=
nv40_instobj
(
memory
);
mutex_lock
(
&
iobj
->
imem
->
base
.
subdev
.
mutex
);
nvkm_mm_free
(
&
iobj
->
imem
->
heap
,
&
iobj
->
node
);
mutex_unlock
(
&
iobj
->
imem
->
base
.
subdev
.
mutex
);
return
iobj
;
}
static
const
struct
nvkm_memory_func
nv40_instobj_func
=
{
.
dtor
=
nv40_instobj_dtor
,
.
target
=
nv40_instobj_target
,
.
size
=
nv40_instobj_size
,
.
addr
=
nv40_instobj_addr
,
.
acquire
=
nv40_instobj_acquire
,
.
release
=
nv40_instobj_release
,
.
rd32
=
nv40_instobj_rd32
,
.
wr32
=
nv40_instobj_wr32
,
};
static
int
nv40_instobj_new
(
struct
nvkm_instmem
*
base
,
u32
size
,
u32
align
,
bool
zero
,
struct
nvkm_memory
**
pmemory
)
{
struct
nv40_instmem
*
imem
=
nv40_instmem
(
base
);
struct
nv40_instobj
*
iobj
;
int
ret
;
if
(
!
(
iobj
=
kzalloc
(
sizeof
(
*
iobj
),
GFP_KERNEL
)))
return
-
ENOMEM
;
*
pmemory
=
&
iobj
->
memory
;
nvkm_memory_ctor
(
&
nv40_instobj_func
,
&
iobj
->
memory
);
iobj
->
imem
=
imem
;
mutex_lock
(
&
imem
->
base
.
subdev
.
mutex
);
ret
=
nvkm_mm_head
(
&
imem
->
heap
,
0
,
1
,
size
,
size
,
align
?
align
:
1
,
&
iobj
->
node
);
mutex_unlock
(
&
imem
->
base
.
subdev
.
mutex
);
return
ret
;
}
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
...
...
@@ -33,17 +140,31 @@
static
u32
nv40_instmem_rd32
(
struct
nvkm_instmem
*
obj
,
u32
addr
)
{
struct
nv
04
_instmem
*
imem
=
container_of
(
obj
,
typeof
(
*
imem
),
base
);
struct
nv
40
_instmem
*
imem
=
container_of
(
obj
,
typeof
(
*
imem
),
base
);
return
ioread32_native
(
imem
->
iomem
+
addr
);
}
static
void
nv40_instmem_wr32
(
struct
nvkm_instmem
*
obj
,
u32
addr
,
u32
data
)
{
struct
nv
04
_instmem
*
imem
=
container_of
(
obj
,
typeof
(
*
imem
),
base
);
struct
nv
40
_instmem
*
imem
=
container_of
(
obj
,
typeof
(
*
imem
),
base
);
iowrite32_native
(
data
,
imem
->
iomem
+
addr
);
}
static
void
nv40_instmem_dtor
(
struct
nvkm_object
*
object
)
{
struct
nv40_instmem
*
imem
=
(
void
*
)
object
;
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
base
.
ramfc
);
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
base
.
ramro
);
nvkm_ramht_ref
(
NULL
,
&
imem
->
base
.
ramht
);
nvkm_gpuobj_ref
(
NULL
,
&
imem
->
base
.
vbios
);
nvkm_mm_fini
(
&
imem
->
heap
);
if
(
imem
->
iomem
)
iounmap
(
imem
->
iomem
);
nvkm_instmem_destroy
(
&
imem
->
base
);
}
static
const
struct
nvkm_instmem_func
nv40_instmem_func
=
{
.
rd32
=
nv40_instmem_rd32
,
...
...
@@ -56,7 +177,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_device
*
device
=
(
void
*
)
parent
;
struct
nv
04
_instmem
*
imem
;
struct
nv
40
_instmem
*
imem
;
int
ret
,
bar
,
vs
;
ret
=
nvkm_instmem_create
(
parent
,
engine
,
oclass
,
&
imem
);
...
...
@@ -86,7 +207,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
vs
=
hweight8
((
nvkm_rd32
(
device
,
0x001540
)
&
0x0000ff00
)
>>
8
);
if
(
device
->
chipset
==
0x40
)
imem
->
base
.
reserved
=
0x6aa0
*
vs
;
else
if
(
device
->
chipset
<
0x43
)
imem
->
base
.
reserved
=
0x4f00
*
vs
;
else
if
(
nv44_gr_class
(
imem
))
imem
->
base
.
reserved
=
0x4980
*
vs
;
else
if
(
nv44_gr_class
(
imem
))
imem
->
base
.
reserved
=
0x4980
*
vs
;
else
imem
->
base
.
reserved
=
0x4a40
*
vs
;
imem
->
base
.
reserved
+=
16
*
1024
;
imem
->
base
.
reserved
*=
32
;
/* per-channel */
...
...
@@ -101,12 +222,13 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
/* 0x00000-0x10000: reserve for probable vbios image */
ret
=
nvkm_gpuobj_new
(
nv_object
(
imem
),
NULL
,
0x10000
,
0
,
0
,
&
imem
->
vbios
);
&
imem
->
base
.
vbios
);
if
(
ret
)
return
ret
;
/* 0x10000-0x18000: reserve for RAMHT */
ret
=
nvkm_ramht_new
(
nv_object
(
imem
),
NULL
,
0x08000
,
0
,
&
imem
->
ramht
);
ret
=
nvkm_ramht_new
(
nv_object
(
imem
),
NULL
,
0x08000
,
0
,
&
imem
->
base
.
ramht
);
if
(
ret
)
return
ret
;
...
...
@@ -114,7 +236,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
* 0x18200-0x20000: padding
*/
ret
=
nvkm_gpuobj_new
(
nv_object
(
imem
),
NULL
,
0x08000
,
0
,
0
,
&
imem
->
ramro
);
&
imem
->
base
.
ramro
);
if
(
ret
)
return
ret
;
...
...
@@ -122,7 +244,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
* 0x21000-0x40000: padding and some unknown crap
*/
ret
=
nvkm_gpuobj_new
(
nv_object
(
imem
),
NULL
,
0x20000
,
0
,
NVOBJ_FLAG_ZERO_ALLOC
,
&
imem
->
ramfc
);
NVOBJ_FLAG_ZERO_ALLOC
,
&
imem
->
base
.
ramfc
);
if
(
ret
)
return
ret
;
...
...
@@ -134,9 +256,11 @@ nv40_instmem_oclass = &(struct nvkm_instmem_impl) {
.
base
.
handle
=
NV_SUBDEV
(
INSTMEM
,
0x40
),
.
base
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
nv40_instmem_ctor
,
.
dtor
=
nv
04
_instmem_dtor
,
.
dtor
=
nv
40
_instmem_dtor
,
.
init
=
_nvkm_instmem_init
,
.
fini
=
_nvkm_instmem_fini
,
},
.
instobj
=
&
nv04_instobj_oclass
.
base
,
.
memory_new
=
nv40_instobj_new
,
.
persistent
=
false
,
.
zero
=
false
,
}.
base
;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
View file @
d8e83994
...
...
@@ -21,115 +21,201 @@
*
* Authors: Ben Skeggs
*/
#define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
#include "priv.h"
#include <core/memory.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
struct
nv50_instmem
{
struct
nvkm_instmem
base
;
unsigned
long
lock_flags
;
spinlock_t
lock
;
u64
addr
;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
struct
nv50_instobj
{
struct
nvkm_instobj
base
;
struct
nvkm_memory
memory
;
struct
nv50_instmem
*
imem
;
struct
nvkm_mem
*
mem
;
struct
nvkm_vma
bar
;
void
*
map
;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
static
enum
nvkm_memory_target
nv50_instobj_target
(
struct
nvkm_memory
*
memory
)
{
return
NVKM_MEM_TARGET_VRAM
;
}
static
u64
nv50_instobj_addr
(
struct
nvkm_memory
*
memory
)
{
return
nv50_instobj
(
memory
)
->
mem
->
offset
;
}
static
u64
nv50_instobj_size
(
struct
nvkm_memory
*
memory
)
{
return
(
u64
)
nv50_instobj
(
memory
)
->
mem
->
size
<<
NVKM_RAM_MM_SHIFT
;
}
static
void
nv50_instobj_boot
(
struct
nvkm_memory
*
memory
,
struct
nvkm_vm
*
vm
)
{
struct
nv50_instobj
*
iobj
=
nv50_instobj
(
memory
);
struct
nvkm_subdev
*
subdev
=
&
iobj
->
imem
->
base
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
u64
size
=
nvkm_memory_size
(
memory
);
void
__iomem
*
map
;
int
ret
;
iobj
->
map
=
ERR_PTR
(
-
ENOMEM
);
ret
=
nvkm_vm_get
(
vm
,
size
,
12
,
NV_MEM_ACCESS_RW
,
&
iobj
->
bar
);
if
(
ret
==
0
)
{
map
=
ioremap
(
nv_device_resource_start
(
device
,
3
)
+
(
u32
)
iobj
->
bar
.
offset
,
size
);
if
(
map
)
{
nvkm_memory_map
(
memory
,
&
iobj
->
bar
,
0
);
iobj
->
map
=
map
;
}
else
{
nvkm_warn
(
subdev
,
"PRAMIN ioremap failed
\n
"
);
nvkm_vm_put
(
&
iobj
->
bar
);
}
}
else
{
nvkm_warn
(
subdev
,
"PRAMIN exhausted
\n
"
);
}
}
static
void
nv50_instobj_release
(
struct
nvkm_memory
*
memory
)
{
struct
nv50_instmem
*
imem
=
nv50_instobj
(
memory
)
->
imem
;
spin_unlock_irqrestore
(
&
imem
->
lock
,
imem
->
lock_flags
);
}
static
void
__iomem
*
nv50_instobj_acquire
(
struct
nvkm_memory
*
memory
)
{
struct
nv50_instobj
*
iobj
=
nv50_instobj
(
memory
);
struct
nv50_instmem
*
imem
=
iobj
->
imem
;
struct
nvkm_bar
*
bar
=
imem
->
base
.
subdev
.
device
->
bar
;
struct
nvkm_vm
*
vm
;
unsigned
long
flags
;
if
(
!
iobj
->
map
&&
bar
&&
bar
->
kmap
&&
(
vm
=
bar
->
kmap
(
bar
)))
nvkm_memory_boot
(
memory
,
vm
);
if
(
!
IS_ERR_OR_NULL
(
iobj
->
map
))
return
iobj
->
map
;
spin_lock_irqsave
(
&
imem
->
lock
,
flags
);
imem
->
lock_flags
=
flags
;
return
NULL
;
}
static
u32
nv50_instobj_rd32
(
struct
nvkm_
object
*
object
,
u64
offset
)
nv50_instobj_rd32
(
struct
nvkm_
memory
*
memory
,
u64
offset
)
{
struct
nv50_inst
mem
*
imem
=
(
void
*
)
nvkm_instmem
(
object
);
struct
nv50_inst
obj
*
node
=
(
void
*
)
object
;
struct
nv50_inst
obj
*
iobj
=
nv50_instobj
(
memory
);
struct
nv50_inst
mem
*
imem
=
iobj
->
imem
;
struct
nvkm_device
*
device
=
imem
->
base
.
subdev
.
device
;
unsigned
long
flags
;
u64
base
=
(
node
->
mem
->
offset
+
offset
)
&
0xffffff00000ULL
;
u64
addr
=
(
node
->
mem
->
offset
+
offset
)
&
0x000000fffffULL
;
u64
base
=
(
iobj
->
mem
->
offset
+
offset
)
&
0xffffff00000ULL
;
u64
addr
=
(
iobj
->
mem
->
offset
+
offset
)
&
0x000000fffffULL
;
u32
data
;
spin_lock_irqsave
(
&
imem
->
lock
,
flags
);
if
(
unlikely
(
imem
->
addr
!=
base
))
{
nvkm_wr32
(
device
,
0x001700
,
base
>>
16
);
imem
->
addr
=
base
;
}
data
=
nvkm_rd32
(
device
,
0x700000
+
addr
);
spin_unlock_irqrestore
(
&
imem
->
lock
,
flags
);
return
data
;
}
static
void
nv50_instobj_wr32
(
struct
nvkm_
object
*
object
,
u64
offset
,
u32
data
)
nv50_instobj_wr32
(
struct
nvkm_
memory
*
memory
,
u64
offset
,
u32
data
)
{
struct
nv50_inst
mem
*
imem
=
(
void
*
)
nvkm_instmem
(
object
);
struct
nv50_inst
obj
*
node
=
(
void
*
)
object
;
struct
nv50_inst
obj
*
iobj
=
nv50_instobj
(
memory
);
struct
nv50_inst
mem
*
imem
=
iobj
->
imem
;
struct
nvkm_device
*
device
=
imem
->
base
.
subdev
.
device
;
unsigned
long
flags
;
u64
base
=
(
node
->
mem
->
offset
+
offset
)
&
0xffffff00000ULL
;
u64
addr
=
(
node
->
mem
->
offset
+
offset
)
&
0x000000fffffULL
;
u64
base
=
(
iobj
->
mem
->
offset
+
offset
)
&
0xffffff00000ULL
;
u64
addr
=
(
iobj
->
mem
->
offset
+
offset
)
&
0x000000fffffULL
;
spin_lock_irqsave
(
&
imem
->
lock
,
flags
);
if
(
unlikely
(
imem
->
addr
!=
base
))
{
nvkm_wr32
(
device
,
0x001700
,
base
>>
16
);
imem
->
addr
=
base
;
}
nvkm_wr32
(
device
,
0x700000
+
addr
,
data
);
spin_unlock_irqrestore
(
&
imem
->
lock
,
flags
);
}
static
void
nv50_instobj_
dtor
(
struct
nvkm_object
*
objec
t
)
nv50_instobj_
map
(
struct
nvkm_memory
*
memory
,
struct
nvkm_vma
*
vma
,
u64
offse
t
)
{
struct
nv50_instobj
*
node
=
(
void
*
)
object
;
struct
nvkm_ram
*
ram
=
nvkm_fb
(
object
)
->
ram
;
ram
->
func
->
put
(
ram
,
&
node
->
mem
);
nvkm_instobj_destroy
(
&
node
->
base
);
struct
nv50_instobj
*
iobj
=
nv50_instobj
(
memory
);
nvkm_vm_map_at
(
vma
,
offset
,
iobj
->
mem
);
}
static
void
*
nv50_instobj_dtor
(
struct
nvkm_memory
*
memory
)
{
struct
nv50_instobj
*
iobj
=
nv50_instobj
(
memory
);
struct
nvkm_ram
*
ram
=
iobj
->
imem
->
base
.
subdev
.
device
->
fb
->
ram
;
if
(
!
IS_ERR_OR_NULL
(
iobj
->
map
))
{
nvkm_vm_put
(
&
iobj
->
bar
);
iounmap
(
iobj
->
map
);
}
ram
->
func
->
put
(
ram
,
&
iobj
->
mem
);
return
iobj
;
}
static
const
struct
nvkm_memory_func
nv50_instobj_func
=
{
.
dtor
=
nv50_instobj_dtor
,
.
target
=
nv50_instobj_target
,
.
size
=
nv50_instobj_size
,
.
addr
=
nv50_instobj_addr
,
.
boot
=
nv50_instobj_boot
,
.
acquire
=
nv50_instobj_acquire
,
.
release
=
nv50_instobj_release
,
.
rd32
=
nv50_instobj_rd32
,
.
wr32
=
nv50_instobj_wr32
,
.
map
=
nv50_instobj_map
,
};
static
int
nv50_instobj_ctor
(
struct
nvkm_object
*
parent
,
struct
nvkm_object
*
engine
,
struct
nvkm_oclass
*
oclass
,
void
*
data
,
u32
size
,
struct
nvkm_object
**
pobject
)
nv50_instobj_new
(
struct
nvkm_instmem
*
base
,
u32
size
,
u32
align
,
bool
zero
,
struct
nvkm_memory
**
pmemory
)
{
struct
nv
km_ram
*
ram
=
nvkm_fb
(
parent
)
->
ram
;
struct
nv
km_instobj_args
*
args
=
data
;
struct
nv
50_instobj
*
node
;
struct
nv
50_instmem
*
imem
=
nv50_instmem
(
base
)
;
struct
nv
50_instobj
*
iobj
;
struct
nv
km_ram
*
ram
=
imem
->
base
.
subdev
.
device
->
fb
->
ram
;
int
ret
;
args
->
size
=
max
((
args
->
size
+
4095
)
&
~
4095
,
(
u32
)
4096
);
args
->
align
=
max
((
args
->
align
+
4095
)
&
~
4095
,
(
u32
)
4096
);
if
(
!
(
iobj
=
kzalloc
(
sizeof
(
*
iobj
),
GFP_KERNEL
)))
return
-
ENOMEM
;
*
pmemory
=
&
iobj
->
memory
;
ret
=
nvkm_instobj_create
(
parent
,
engine
,
oclass
,
&
node
);
*
pobject
=
nv_object
(
node
);
if
(
ret
)
return
ret
;
nvkm_memory_ctor
(
&
nv50_instobj_func
,
&
iobj
->
memory
);
iobj
->
imem
=
imem
;
size
=
max
((
size
+
4095
)
&
~
4095
,
(
u32
)
4096
);
align
=
max
((
align
+
4095
)
&
~
4095
,
(
u32
)
4096
);
ret
=
ram
->
func
->
get
(
ram
,
args
->
size
,
args
->
align
,
0
,
0x800
,
&
node
->
mem
);
ret
=
ram
->
func
->
get
(
ram
,
size
,
align
,
0
,
0x800
,
&
iobj
->
mem
);
if
(
ret
)
return
ret
;
node
->
base
.
addr
=
node
->
mem
->
offset
;
node
->
base
.
size
=
node
->
mem
->
size
<<
12
;
node
->
mem
->
page_shift
=
12
;
iobj
->
mem
->
page_shift
=
12
;
return
0
;
}
static
struct
nvkm_instobj_impl
nv50_instobj_oclass
=
{
.
base
.
ofuncs
=
&
(
struct
nvkm_ofuncs
)
{
.
ctor
=
nv50_instobj_ctor
,
.
dtor
=
nv50_instobj_dtor
,
.
init
=
_nvkm_instobj_init
,
.
fini
=
_nvkm_instobj_fini
,
.
rd32
=
nv50_instobj_rd32
,
.
wr32
=
nv50_instobj_wr32
,
},
};
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
...
...
@@ -168,5 +254,7 @@ nv50_instmem_oclass = &(struct nvkm_instmem_impl) {
.
init
=
_nvkm_instmem_init
,
.
fini
=
nv50_instmem_fini
,
},
.
instobj
=
&
nv50_instobj_oclass
.
base
,
.
memory_new
=
nv50_instobj_new
,
.
persistent
=
false
,
.
zero
=
false
,
}.
base
;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
View file @
d8e83994
...
...
@@ -2,35 +2,12 @@
#define __NVKM_INSTMEM_PRIV_H__
#include <subdev/instmem.h>
struct
nvkm_instobj_impl
{
struct
nvkm_oclass
base
;
};
struct
nvkm_instobj_args
{
u32
size
;
u32
align
;
};
#define nvkm_instobj_create(p,e,o,d) \
nvkm_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
#define nvkm_instobj_destroy(p) ({ \
struct nvkm_instobj *iobj = (p); \
_nvkm_instobj_dtor(nv_object(iobj)); \
})
#define nvkm_instobj_init(p) \
_nvkm_object_init(&(p)->base)
#define nvkm_instobj_fini(p,s) \
_nvkm_object_fini(&(p)->base, (s))
int
nvkm_instobj_create_
(
struct
nvkm_object
*
,
struct
nvkm_object
*
,
struct
nvkm_oclass
*
,
int
,
void
**
);
void
_nvkm_instobj_dtor
(
struct
nvkm_object
*
);
#define _nvkm_instobj_init _nvkm_object_init
#define _nvkm_instobj_fini _nvkm_object_fini
struct
nvkm_instmem_impl
{
struct
nvkm_oclass
base
;
struct
nvkm_oclass
*
instobj
;
int
(
*
memory_new
)(
struct
nvkm_instmem
*
,
u32
size
,
u32
align
,
bool
zero
,
struct
nvkm_memory
**
);
bool
persistent
;
bool
zero
;
};
#define nvkm_instmem_create(p,e,o,d) \
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
View file @
d8e83994
...
...
@@ -338,6 +338,25 @@ nvkm_vm_put(struct nvkm_vma *vma)
nvkm_vm_ref
(
NULL
,
&
vma
->
vm
,
NULL
);
}
int
nvkm_vm_boot
(
struct
nvkm_vm
*
vm
,
u64
size
)
{
struct
nvkm_mmu
*
mmu
=
vm
->
mmu
;
struct
nvkm_gpuobj
*
pgt
;
int
ret
;
ret
=
nvkm_gpuobj_new
(
nv_object
(
mmu
),
NULL
,
(
size
>>
mmu
->
spg_shift
)
*
8
,
0x1000
,
NVOBJ_FLAG_ZERO_ALLOC
,
&
pgt
);
if
(
ret
==
0
)
{
vm
->
pgt
[
0
].
refcount
[
0
]
=
1
;
vm
->
pgt
[
0
].
obj
[
0
]
=
pgt
;
nvkm_memory_boot
(
pgt
->
memory
,
vm
);
}
return
ret
;
}
int
nvkm_vm_create
(
struct
nvkm_mmu
*
mmu
,
u64
offset
,
u64
length
,
u64
mm_offset
,
u32
block
,
struct
lock_class_key
*
key
,
struct
nvkm_vm
**
pvm
)
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
View file @
d8e83994
...
...
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
#include <subdev/mmu.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/ltc.h>
#include <subdev/timer.h>
...
...
@@ -163,12 +162,9 @@ gf100_vm_flush(struct nvkm_vm *vm)
{
struct
nvkm_mmu
*
mmu
=
(
void
*
)
vm
->
mmu
;
struct
nvkm_device
*
device
=
mmu
->
subdev
.
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_vm_pgd
*
vpgd
;
u32
type
;
bar
->
flush
(
bar
);
type
=
0x00000001
;
/* PAGE_ALL */
if
(
atomic_read
(
&
vm
->
engref
[
NVDEV_SUBDEV_BAR
]))
type
|=
0x00000004
;
/* HUB_ONLY */
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
View file @
d8e83994
...
...
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
#include <subdev/mmu.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
...
...
@@ -156,12 +155,9 @@ nv50_vm_flush(struct nvkm_vm *vm)
struct
nvkm_mmu
*
mmu
=
(
void
*
)
vm
->
mmu
;
struct
nvkm_subdev
*
subdev
=
&
mmu
->
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_engine
*
engine
;
int
i
,
vme
;
bar
->
flush
(
bar
);
mutex_lock
(
&
subdev
->
mutex
);
for
(
i
=
0
;
i
<
NVDEV_SUBDEV_NR
;
i
++
)
{
if
(
!
atomic_read
(
&
vm
->
engref
[
i
]))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment