Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
6ce51352
Commit
6ce51352
authored
Nov 01, 2017
by
Ben Skeggs
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
drm/nouveau/mmu/nv44: implement new vmm backend
Signed-off-by:
Ben Skeggs
<
bskeggs@redhat.com
>
parent
473f9aca
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
168 additions
and
141 deletions
+168
-141
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+3
-141
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
+165
-0
No files found.
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
View file @
6ce51352
...
...
@@ -24,150 +24,16 @@
#include "vmm.h"
#include <core/option.h>
#include <subdev/timer.h>
#include <nvif/class.h>
#define NV44_GART_SIZE (512 * 1024 * 1024)
#define NV44_GART_PAGE ( 4 * 1024)
/*******************************************************************************
* VM map/unmap callbacks
******************************************************************************/
static
void
nv44_vm_fill
(
struct
nvkm_memory
*
pgt
,
dma_addr_t
null
,
dma_addr_t
*
list
,
u32
pte
,
u32
cnt
)
{
u32
base
=
(
pte
<<
2
)
&
~
0x0000000f
;
u32
tmp
[
4
];
tmp
[
0
]
=
nvkm_ro32
(
pgt
,
base
+
0x0
);
tmp
[
1
]
=
nvkm_ro32
(
pgt
,
base
+
0x4
);
tmp
[
2
]
=
nvkm_ro32
(
pgt
,
base
+
0x8
);
tmp
[
3
]
=
nvkm_ro32
(
pgt
,
base
+
0xc
);
while
(
cnt
--
)
{
u32
addr
=
list
?
(
*
list
++
>>
12
)
:
(
null
>>
12
);
switch
(
pte
++
&
0x3
)
{
case
0
:
tmp
[
0
]
&=
~
0x07ffffff
;
tmp
[
0
]
|=
addr
;
break
;
case
1
:
tmp
[
0
]
&=
~
0xf8000000
;
tmp
[
0
]
|=
addr
<<
27
;
tmp
[
1
]
&=
~
0x003fffff
;
tmp
[
1
]
|=
addr
>>
5
;
break
;
case
2
:
tmp
[
1
]
&=
~
0xffc00000
;
tmp
[
1
]
|=
addr
<<
22
;
tmp
[
2
]
&=
~
0x0001ffff
;
tmp
[
2
]
|=
addr
>>
10
;
break
;
case
3
:
tmp
[
2
]
&=
~
0xfffe0000
;
tmp
[
2
]
|=
addr
<<
17
;
tmp
[
3
]
&=
~
0x00000fff
;
tmp
[
3
]
|=
addr
>>
15
;
break
;
}
}
nvkm_wo32
(
pgt
,
base
+
0x0
,
tmp
[
0
]);
nvkm_wo32
(
pgt
,
base
+
0x4
,
tmp
[
1
]);
nvkm_wo32
(
pgt
,
base
+
0x8
,
tmp
[
2
]);
nvkm_wo32
(
pgt
,
base
+
0xc
,
tmp
[
3
]
|
0x40000000
);
}
static
void
nv44_vm_map_sg
(
struct
nvkm_vma
*
vma
,
struct
nvkm_memory
*
pgt
,
struct
nvkm_mem
*
mem
,
u32
pte
,
u32
cnt
,
dma_addr_t
*
list
)
{
u32
tmp
[
4
];
int
i
;
nvkm_kmap
(
pgt
);
if
(
pte
&
3
)
{
u32
max
=
4
-
(
pte
&
3
);
u32
part
=
(
cnt
>
max
)
?
max
:
cnt
;
nv44_vm_fill
(
pgt
,
vma
->
vm
->
null
,
list
,
pte
,
part
);
pte
+=
part
;
list
+=
part
;
cnt
-=
part
;
}
while
(
cnt
>=
4
)
{
for
(
i
=
0
;
i
<
4
;
i
++
)
tmp
[
i
]
=
*
list
++
>>
12
;
nvkm_wo32
(
pgt
,
pte
++
*
4
,
tmp
[
0
]
>>
0
|
tmp
[
1
]
<<
27
);
nvkm_wo32
(
pgt
,
pte
++
*
4
,
tmp
[
1
]
>>
5
|
tmp
[
2
]
<<
22
);
nvkm_wo32
(
pgt
,
pte
++
*
4
,
tmp
[
2
]
>>
10
|
tmp
[
3
]
<<
17
);
nvkm_wo32
(
pgt
,
pte
++
*
4
,
tmp
[
3
]
>>
15
|
0x40000000
);
cnt
-=
4
;
}
if
(
cnt
)
nv44_vm_fill
(
pgt
,
vma
->
vm
->
null
,
list
,
pte
,
cnt
);
nvkm_done
(
pgt
);
}
static
void
nv44_vm_unmap
(
struct
nvkm_vma
*
vma
,
struct
nvkm_memory
*
pgt
,
u32
pte
,
u32
cnt
)
{
nvkm_kmap
(
pgt
);
if
(
pte
&
3
)
{
u32
max
=
4
-
(
pte
&
3
);
u32
part
=
(
cnt
>
max
)
?
max
:
cnt
;
nv44_vm_fill
(
pgt
,
vma
->
vm
->
null
,
NULL
,
pte
,
part
);
pte
+=
part
;
cnt
-=
part
;
}
while
(
cnt
>=
4
)
{
nvkm_wo32
(
pgt
,
pte
++
*
4
,
0x00000000
);
nvkm_wo32
(
pgt
,
pte
++
*
4
,
0x00000000
);
nvkm_wo32
(
pgt
,
pte
++
*
4
,
0x00000000
);
nvkm_wo32
(
pgt
,
pte
++
*
4
,
0x00000000
);
cnt
-=
4
;
}
if
(
cnt
)
nv44_vm_fill
(
pgt
,
vma
->
vm
->
null
,
NULL
,
pte
,
cnt
);
nvkm_done
(
pgt
);
}
static
void
nv44_vm_flush
(
struct
nvkm_vm
*
vm
)
{
struct
nvkm_device
*
device
=
vm
->
mmu
->
subdev
.
device
;
nvkm_wr32
(
device
,
0x100814
,
vm
->
mmu
->
limit
-
NV44_GART_PAGE
);
nvkm_wr32
(
device
,
0x100808
,
0x00000020
);
nvkm_msec
(
device
,
2000
,
if
(
nvkm_rd32
(
device
,
0x100808
)
&
0x00000001
)
break
;
);
nvkm_wr32
(
device
,
0x100808
,
0x00000000
);
}
/*******************************************************************************
* MMU subdev
******************************************************************************/
static
int
nv44_mmu_oneinit
(
struct
nvkm_mmu
*
mmu
)
{
mmu
->
vmm
->
pgt
[
0
].
mem
[
0
]
=
mmu
->
vmm
->
pd
->
pt
[
0
]
->
memory
;
mmu
->
vmm
->
pgt
[
0
].
refcount
[
0
]
=
1
;
return
0
;
}
static
void
nv44_mmu_init
(
struct
nvkm_mmu
*
mmu
)
{
struct
nvkm_device
*
device
=
mmu
->
subdev
.
device
;
struct
nvkm_memory
*
gart
=
mmu
->
vmm
->
pgt
[
0
].
mem
[
0
]
;
struct
nvkm_memory
*
pt
=
mmu
->
vmm
->
pd
->
pt
[
0
]
->
memory
;
u32
addr
;
/* calculate vram address of this PRAMIN block, object must be
...
...
@@ -175,11 +41,11 @@ nv44_mmu_init(struct nvkm_mmu *mmu)
* of 512KiB for this to work correctly
*/
addr
=
nvkm_rd32
(
device
,
0x10020c
);
addr
-=
((
nvkm_memory_addr
(
gar
t
)
>>
19
)
+
1
)
<<
19
;
addr
-=
((
nvkm_memory_addr
(
p
t
)
>>
19
)
+
1
)
<<
19
;
nvkm_wr32
(
device
,
0x100850
,
0x80000000
);
nvkm_wr32
(
device
,
0x100818
,
mmu
->
vmm
->
null
);
nvkm_wr32
(
device
,
0x100804
,
NV44_GART_SIZE
);
nvkm_wr32
(
device
,
0x100804
,
(
nvkm_memory_size
(
pt
)
/
4
)
*
4096
);
nvkm_wr32
(
device
,
0x100850
,
0x00008000
);
nvkm_mask
(
device
,
0x10008c
,
0x00000200
,
0x00000200
);
nvkm_wr32
(
device
,
0x100820
,
0x00000000
);
...
...
@@ -189,16 +55,12 @@ nv44_mmu_init(struct nvkm_mmu *mmu)
static
const
struct
nvkm_mmu_func
nv44_mmu
=
{
.
oneinit
=
nv44_mmu_oneinit
,
.
init
=
nv44_mmu_init
,
.
limit
=
NV44_GART_SIZE
,
.
dma_bits
=
39
,
.
pgt_bits
=
32
-
12
,
.
spg_shift
=
12
,
.
lpg_shift
=
12
,
.
map_sg
=
nv44_vm_map_sg
,
.
unmap
=
nv44_vm_unmap
,
.
flush
=
nv44_vm_flush
,
.
vmm
=
{{
-
1
,
-
1
,
NVIF_CLASS_VMM_NV04
},
nv44_vmm_new
,
true
},
};
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
View file @
6ce51352
...
...
@@ -21,8 +21,158 @@
*/
#include "vmm.h"
#include <subdev/timer.h>
static
void
nv44_vmm_pgt_fill
(
struct
nvkm_vmm
*
vmm
,
struct
nvkm_mmu_pt
*
pt
,
dma_addr_t
*
list
,
u32
ptei
,
u32
ptes
)
{
u32
pteo
=
(
ptei
<<
2
)
&
~
0x0000000f
;
u32
tmp
[
4
];
tmp
[
0
]
=
nvkm_ro32
(
pt
->
memory
,
pteo
+
0x0
);
tmp
[
1
]
=
nvkm_ro32
(
pt
->
memory
,
pteo
+
0x4
);
tmp
[
2
]
=
nvkm_ro32
(
pt
->
memory
,
pteo
+
0x8
);
tmp
[
3
]
=
nvkm_ro32
(
pt
->
memory
,
pteo
+
0xc
);
while
(
ptes
--
)
{
u32
addr
=
(
list
?
*
list
++
:
vmm
->
null
)
>>
12
;
switch
(
ptei
++
&
0x3
)
{
case
0
:
tmp
[
0
]
&=
~
0x07ffffff
;
tmp
[
0
]
|=
addr
;
break
;
case
1
:
tmp
[
0
]
&=
~
0xf8000000
;
tmp
[
0
]
|=
addr
<<
27
;
tmp
[
1
]
&=
~
0x003fffff
;
tmp
[
1
]
|=
addr
>>
5
;
break
;
case
2
:
tmp
[
1
]
&=
~
0xffc00000
;
tmp
[
1
]
|=
addr
<<
22
;
tmp
[
2
]
&=
~
0x0001ffff
;
tmp
[
2
]
|=
addr
>>
10
;
break
;
case
3
:
tmp
[
2
]
&=
~
0xfffe0000
;
tmp
[
2
]
|=
addr
<<
17
;
tmp
[
3
]
&=
~
0x00000fff
;
tmp
[
3
]
|=
addr
>>
15
;
break
;
}
}
VMM_WO032
(
pt
,
vmm
,
pteo
+
0x0
,
tmp
[
0
]);
VMM_WO032
(
pt
,
vmm
,
pteo
+
0x4
,
tmp
[
1
]);
VMM_WO032
(
pt
,
vmm
,
pteo
+
0x8
,
tmp
[
2
]);
VMM_WO032
(
pt
,
vmm
,
pteo
+
0xc
,
tmp
[
3
]
|
0x40000000
);
}
static
void
nv44_vmm_pgt_pte
(
struct
nvkm_vmm
*
vmm
,
struct
nvkm_mmu_pt
*
pt
,
u32
ptei
,
u32
ptes
,
struct
nvkm_vmm_map
*
map
,
u64
addr
)
{
dma_addr_t
tmp
[
4
],
i
;
if
(
ptei
&
3
)
{
const
u32
pten
=
min
(
ptes
,
4
-
(
ptei
&
3
));
for
(
i
=
0
;
i
<
pten
;
i
++
,
addr
+=
0x1000
)
tmp
[
i
]
=
addr
;
nv44_vmm_pgt_fill
(
vmm
,
pt
,
tmp
,
ptei
,
pten
);
ptei
+=
pten
;
ptes
-=
pten
;
}
while
(
ptes
>=
4
)
{
for
(
i
=
0
;
i
<
4
;
i
++
,
addr
+=
0x1000
)
tmp
[
i
]
=
addr
>>
12
;
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
tmp
[
0
]
>>
0
|
tmp
[
1
]
<<
27
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
tmp
[
1
]
>>
5
|
tmp
[
2
]
<<
22
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
tmp
[
2
]
>>
10
|
tmp
[
3
]
<<
17
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
tmp
[
3
]
>>
15
|
0x40000000
);
ptes
-=
4
;
}
if
(
ptes
)
{
for
(
i
=
0
;
i
<
ptes
;
i
++
,
addr
+=
0x1000
)
tmp
[
i
]
=
addr
;
nv44_vmm_pgt_fill
(
vmm
,
pt
,
tmp
,
ptei
,
ptes
);
}
}
static
void
nv44_vmm_pgt_sgl
(
struct
nvkm_vmm
*
vmm
,
struct
nvkm_mmu_pt
*
pt
,
u32
ptei
,
u32
ptes
,
struct
nvkm_vmm_map
*
map
)
{
VMM_MAP_ITER_SGL
(
vmm
,
pt
,
ptei
,
ptes
,
map
,
nv44_vmm_pgt_pte
);
}
static
void
nv44_vmm_pgt_dma
(
struct
nvkm_vmm
*
vmm
,
struct
nvkm_mmu_pt
*
pt
,
u32
ptei
,
u32
ptes
,
struct
nvkm_vmm_map
*
map
)
{
#if PAGE_SHIFT == 12
nvkm_kmap
(
pt
->
memory
);
if
(
ptei
&
3
)
{
const
u32
pten
=
min
(
ptes
,
4
-
(
ptei
&
3
));
nv44_vmm_pgt_fill
(
vmm
,
pt
,
map
->
dma
,
ptei
,
pten
);
ptei
+=
pten
;
ptes
-=
pten
;
map
->
dma
+=
pten
;
}
while
(
ptes
>=
4
)
{
u32
tmp
[
4
],
i
;
for
(
i
=
0
;
i
<
4
;
i
++
)
tmp
[
i
]
=
*
map
->
dma
++
>>
12
;
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
tmp
[
0
]
>>
0
|
tmp
[
1
]
<<
27
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
tmp
[
1
]
>>
5
|
tmp
[
2
]
<<
22
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
tmp
[
2
]
>>
10
|
tmp
[
3
]
<<
17
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
tmp
[
3
]
>>
15
|
0x40000000
);
ptes
-=
4
;
}
if
(
ptes
)
{
nv44_vmm_pgt_fill
(
vmm
,
pt
,
map
->
dma
,
ptei
,
ptes
);
map
->
dma
+=
ptes
;
}
nvkm_done
(
pt
->
memory
);
#else
VMM_MAP_ITER_DMA
(
vmm
,
pt
,
ptei
,
ptes
,
map
,
nv44_vmm_pgt_pte
);
#endif
}
static
void
nv44_vmm_pgt_unmap
(
struct
nvkm_vmm
*
vmm
,
struct
nvkm_mmu_pt
*
pt
,
u32
ptei
,
u32
ptes
)
{
nvkm_kmap
(
pt
->
memory
);
if
(
ptei
&
3
)
{
const
u32
pten
=
min
(
ptes
,
4
-
(
ptei
&
3
));
nv44_vmm_pgt_fill
(
vmm
,
pt
,
NULL
,
ptei
,
pten
);
ptei
+=
pten
;
ptes
-=
pten
;
}
while
(
ptes
>
4
)
{
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
0x00000000
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
0x00000000
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
0x00000000
);
VMM_WO032
(
pt
,
vmm
,
ptei
++
*
4
,
0x00000000
);
ptes
-=
4
;
}
if
(
ptes
)
nv44_vmm_pgt_fill
(
vmm
,
pt
,
NULL
,
ptei
,
ptes
);
nvkm_done
(
pt
->
memory
);
}
static
const
struct
nvkm_vmm_desc_func
nv44_vmm_desc_pgt
=
{
.
unmap
=
nv44_vmm_pgt_unmap
,
.
dma
=
nv44_vmm_pgt_dma
,
.
sgl
=
nv44_vmm_pgt_sgl
,
};
static
const
struct
nvkm_vmm_desc
...
...
@@ -31,8 +181,23 @@ nv44_vmm_desc_12[] = {
{}
};
static
void
nv44_vmm_flush
(
struct
nvkm_vmm
*
vmm
,
int
level
)
{
struct
nvkm_device
*
device
=
vmm
->
mmu
->
subdev
.
device
;
nvkm_wr32
(
device
,
0x100814
,
vmm
->
limit
-
4096
);
nvkm_wr32
(
device
,
0x100808
,
0x000000020
);
nvkm_msec
(
device
,
2000
,
if
(
nvkm_rd32
(
device
,
0x100808
)
&
0x00000001
)
break
;
);
nvkm_wr32
(
device
,
0x100808
,
0x00000000
);
}
static
const
struct
nvkm_vmm_func
nv44_vmm
=
{
.
valid
=
nv04_vmm_valid
,
.
flush
=
nv44_vmm_flush
,
.
page
=
{
{
12
,
&
nv44_vmm_desc_12
[
0
],
NVKM_VMM_PAGE_HOST
},
{}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment