Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
822ad79f
Commit
822ad79f
authored
Aug 20, 2015
by
Ben Skeggs
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
drm/nouveau/clk: switch to device pri macros
Signed-off-by:
Ben Skeggs
<
bskeggs@redhat.com
>
parent
14caba44
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
234 additions
and
181 deletions
+234
-181
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+30
-21
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+39
-27
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+43
-35
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+50
-39
drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+29
-25
drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+14
-10
drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+29
-24
No files found.
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
View file @
822ad79f
...
...
@@ -47,7 +47,8 @@ static u32 read_div(struct gf100_clk *, int, u32, u32);
static
u32
read_vco
(
struct
gf100_clk
*
clk
,
u32
dsrc
)
{
u32
ssrc
=
nv_rd32
(
clk
,
dsrc
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ssrc
=
nvkm_rd32
(
device
,
dsrc
);
if
(
!
(
ssrc
&
0x00000100
))
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_sppll0
);
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_sppll1
);
...
...
@@ -56,8 +57,9 @@ read_vco(struct gf100_clk *clk, u32 dsrc)
static
u32
read_pll
(
struct
gf100_clk
*
clk
,
u32
pll
)
{
u32
ctrl
=
nv_rd32
(
clk
,
pll
+
0x00
);
u32
coef
=
nv_rd32
(
clk
,
pll
+
0x04
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ctrl
=
nvkm_rd32
(
device
,
pll
+
0x00
);
u32
coef
=
nvkm_rd32
(
device
,
pll
+
0x04
);
u32
P
=
(
coef
&
0x003f0000
)
>>
16
;
u32
N
=
(
coef
&
0x0000ff00
)
>>
8
;
u32
M
=
(
coef
&
0x000000ff
)
>>
0
;
...
...
@@ -69,7 +71,7 @@ read_pll(struct gf100_clk *clk, u32 pll)
switch
(
pll
)
{
case
0x00e800
:
case
0x00e820
:
sclk
=
nv_device
(
clk
)
->
crystal
;
sclk
=
device
->
crystal
;
P
=
1
;
break
;
case
0x132000
:
...
...
@@ -94,13 +96,14 @@ read_pll(struct gf100_clk *clk, u32 pll)
static
u32
read_div
(
struct
gf100_clk
*
clk
,
int
doff
,
u32
dsrc
,
u32
dctl
)
{
u32
ssrc
=
nv_rd32
(
clk
,
dsrc
+
(
doff
*
4
));
u32
sctl
=
nv_rd32
(
clk
,
dctl
+
(
doff
*
4
));
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ssrc
=
nvkm_rd32
(
device
,
dsrc
+
(
doff
*
4
));
u32
sctl
=
nvkm_rd32
(
device
,
dctl
+
(
doff
*
4
));
switch
(
ssrc
&
0x00000003
)
{
case
0
:
if
((
ssrc
&
0x00030000
)
!=
0x00030000
)
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
return
108000
;
case
2
:
return
100000
;
...
...
@@ -120,8 +123,9 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
static
u32
read_clk
(
struct
gf100_clk
*
clk
,
int
idx
)
{
u32
sctl
=
nv_rd32
(
clk
,
0x137250
+
(
idx
*
4
));
u32
ssel
=
nv_rd32
(
clk
,
0x137100
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
sctl
=
nvkm_rd32
(
device
,
0x137250
+
(
idx
*
4
));
u32
ssel
=
nvkm_rd32
(
device
,
0x137100
);
u32
sclk
,
sdiv
;
if
(
ssel
&
(
1
<<
idx
))
{
...
...
@@ -145,7 +149,7 @@ static int
gf100_clk_read
(
struct
nvkm_clk
*
obj
,
enum
nv_clk_src
src
)
{
struct
gf100_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
struct
nvkm_device
*
device
=
nv_device
(
clk
)
;
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
switch
(
src
)
{
case
nv_clk_src_crystal
:
...
...
@@ -166,7 +170,7 @@ gf100_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
case
nv_clk_src_mdiv
:
return
read_div
(
clk
,
0
,
0x137300
,
0x137310
);
case
nv_clk_src_mem
:
if
(
nv
_rd32
(
clk
,
0x1373f0
)
&
0x00000002
)
if
(
nv
km_rd32
(
device
,
0x1373f0
)
&
0x00000002
)
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_mpll
);
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_mdiv
);
...
...
@@ -329,16 +333,18 @@ static void
gf100_clk_prog_0
(
struct
gf100_clk
*
clk
,
int
idx
)
{
struct
gf100_clk_info
*
info
=
&
clk
->
eng
[
idx
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
if
(
idx
<
7
&&
!
info
->
ssel
)
{
nv
_mask
(
clk
,
0x1371d0
+
(
idx
*
0x04
),
0x80003f3f
,
info
->
ddiv
);
nv
_wr32
(
clk
,
0x137160
+
(
idx
*
0x04
),
info
->
dsrc
);
nv
km_mask
(
device
,
0x1371d0
+
(
idx
*
0x04
),
0x80003f3f
,
info
->
ddiv
);
nv
km_wr32
(
device
,
0x137160
+
(
idx
*
0x04
),
info
->
dsrc
);
}
}
static
void
gf100_clk_prog_1
(
struct
gf100_clk
*
clk
,
int
idx
)
{
nv_mask
(
clk
,
0x137100
,
(
1
<<
idx
),
0x00000000
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
0x137100
,
(
1
<<
idx
),
0x00000000
);
nv_wait
(
clk
,
0x137100
,
(
1
<<
idx
),
0x00000000
);
}
...
...
@@ -346,15 +352,16 @@ static void
gf100_clk_prog_2
(
struct
gf100_clk
*
clk
,
int
idx
)
{
struct
gf100_clk_info
*
info
=
&
clk
->
eng
[
idx
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
const
u32
addr
=
0x137000
+
(
idx
*
0x20
);
if
(
idx
<=
7
)
{
nv
_mask
(
clk
,
addr
+
0x00
,
0x00000004
,
0x00000000
);
nv
_mask
(
clk
,
addr
+
0x00
,
0x00000001
,
0x00000000
);
nv
km_mask
(
device
,
addr
+
0x00
,
0x00000004
,
0x00000000
);
nv
km_mask
(
device
,
addr
+
0x00
,
0x00000001
,
0x00000000
);
if
(
info
->
coef
)
{
nv
_wr32
(
clk
,
addr
+
0x04
,
info
->
coef
);
nv
_mask
(
clk
,
addr
+
0x00
,
0x00000001
,
0x00000001
);
nv
km_wr32
(
device
,
addr
+
0x04
,
info
->
coef
);
nv
km_mask
(
device
,
addr
+
0x00
,
0x00000001
,
0x00000001
);
nv_wait
(
clk
,
addr
+
0x00
,
0x00020000
,
0x00020000
);
nv
_mask
(
clk
,
addr
+
0x00
,
0x00020004
,
0x00000004
);
nv
km_mask
(
device
,
addr
+
0x00
,
0x00020004
,
0x00000004
);
}
}
}
...
...
@@ -363,8 +370,9 @@ static void
gf100_clk_prog_3
(
struct
gf100_clk
*
clk
,
int
idx
)
{
struct
gf100_clk_info
*
info
=
&
clk
->
eng
[
idx
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
if
(
info
->
ssel
)
{
nv
_mask
(
clk
,
0x137100
,
(
1
<<
idx
),
info
->
ssel
);
nv
km_mask
(
device
,
0x137100
,
(
1
<<
idx
),
info
->
ssel
);
nv_wait
(
clk
,
0x137100
,
(
1
<<
idx
),
info
->
ssel
);
}
}
...
...
@@ -373,7 +381,8 @@ static void
gf100_clk_prog_4
(
struct
gf100_clk
*
clk
,
int
idx
)
{
struct
gf100_clk_info
*
info
=
&
clk
->
eng
[
idx
];
nv_mask
(
clk
,
0x137250
+
(
idx
*
0x04
),
0x00003f3f
,
info
->
mdiv
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
0x137250
+
(
idx
*
0x04
),
0x00003f3f
,
info
->
mdiv
);
}
static
int
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
View file @
822ad79f
...
...
@@ -48,7 +48,8 @@ static u32 read_pll(struct gk104_clk *, u32);
static
u32
read_vco
(
struct
gk104_clk
*
clk
,
u32
dsrc
)
{
u32
ssrc
=
nv_rd32
(
clk
,
dsrc
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ssrc
=
nvkm_rd32
(
device
,
dsrc
);
if
(
!
(
ssrc
&
0x00000100
))
return
read_pll
(
clk
,
0x00e800
);
return
read_pll
(
clk
,
0x00e820
);
...
...
@@ -57,8 +58,9 @@ read_vco(struct gk104_clk *clk, u32 dsrc)
static
u32
read_pll
(
struct
gk104_clk
*
clk
,
u32
pll
)
{
u32
ctrl
=
nv_rd32
(
clk
,
pll
+
0x00
);
u32
coef
=
nv_rd32
(
clk
,
pll
+
0x04
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ctrl
=
nvkm_rd32
(
device
,
pll
+
0x00
);
u32
coef
=
nvkm_rd32
(
device
,
pll
+
0x04
);
u32
P
=
(
coef
&
0x003f0000
)
>>
16
;
u32
N
=
(
coef
&
0x0000ff00
)
>>
8
;
u32
M
=
(
coef
&
0x000000ff
)
>>
0
;
...
...
@@ -71,7 +73,7 @@ read_pll(struct gk104_clk *clk, u32 pll)
switch
(
pll
)
{
case
0x00e800
:
case
0x00e820
:
sclk
=
nv_device
(
clk
)
->
crystal
;
sclk
=
device
->
crystal
;
P
=
1
;
break
;
case
0x132000
:
...
...
@@ -80,7 +82,7 @@ read_pll(struct gk104_clk *clk, u32 pll)
break
;
case
0x132020
:
sclk
=
read_div
(
clk
,
0
,
0x137320
,
0x137330
);
fN
=
nv
_rd32
(
clk
,
pll
+
0x10
)
>>
16
;
fN
=
nv
km_rd32
(
device
,
pll
+
0x10
)
>>
16
;
break
;
case
0x137000
:
case
0x137020
:
...
...
@@ -102,13 +104,14 @@ read_pll(struct gk104_clk *clk, u32 pll)
static
u32
read_div
(
struct
gk104_clk
*
clk
,
int
doff
,
u32
dsrc
,
u32
dctl
)
{
u32
ssrc
=
nv_rd32
(
clk
,
dsrc
+
(
doff
*
4
));
u32
sctl
=
nv_rd32
(
clk
,
dctl
+
(
doff
*
4
));
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ssrc
=
nvkm_rd32
(
device
,
dsrc
+
(
doff
*
4
));
u32
sctl
=
nvkm_rd32
(
device
,
dctl
+
(
doff
*
4
));
switch
(
ssrc
&
0x00000003
)
{
case
0
:
if
((
ssrc
&
0x00030000
)
!=
0x00030000
)
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
return
108000
;
case
2
:
return
100000
;
...
...
@@ -128,7 +131,8 @@ read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
static
u32
read_mem
(
struct
gk104_clk
*
clk
)
{
switch
(
nv_rd32
(
clk
,
0x1373f4
)
&
0x0000000f
)
{
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
switch
(
nvkm_rd32
(
device
,
0x1373f4
)
&
0x0000000f
)
{
case
1
:
return
read_pll
(
clk
,
0x132020
);
case
2
:
return
read_pll
(
clk
,
0x132000
);
default:
...
...
@@ -139,11 +143,12 @@ read_mem(struct gk104_clk *clk)
static
u32
read_clk
(
struct
gk104_clk
*
clk
,
int
idx
)
{
u32
sctl
=
nv_rd32
(
clk
,
0x137250
+
(
idx
*
4
));
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
sctl
=
nvkm_rd32
(
device
,
0x137250
+
(
idx
*
4
));
u32
sclk
,
sdiv
;
if
(
idx
<
7
)
{
u32
ssel
=
nv
_rd32
(
clk
,
0x137100
);
u32
ssel
=
nv
km_rd32
(
device
,
0x137100
);
if
(
ssel
&
(
1
<<
idx
))
{
sclk
=
read_pll
(
clk
,
0x137000
+
(
idx
*
0x20
));
sdiv
=
1
;
...
...
@@ -152,7 +157,7 @@ read_clk(struct gk104_clk *clk, int idx)
sdiv
=
0
;
}
}
else
{
u32
ssrc
=
nv
_rd32
(
clk
,
0x137160
+
(
idx
*
0x04
));
u32
ssrc
=
nv
km_rd32
(
device
,
0x137160
+
(
idx
*
0x04
));
if
((
ssrc
&
0x00000003
)
==
0x00000003
)
{
sclk
=
read_div
(
clk
,
idx
,
0x137160
,
0x1371d0
);
if
(
ssrc
&
0x00000100
)
{
...
...
@@ -183,7 +188,7 @@ static int
gk104_clk_read
(
struct
nvkm_clk
*
obj
,
enum
nv_clk_src
src
)
{
struct
gk104_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
struct
nvkm_device
*
device
=
nv_device
(
clk
)
;
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
switch
(
src
)
{
case
nv_clk_src_crystal
:
...
...
@@ -349,37 +354,41 @@ static void
gk104_clk_prog_0
(
struct
gk104_clk
*
clk
,
int
idx
)
{
struct
gk104_clk_info
*
info
=
&
clk
->
eng
[
idx
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
if
(
!
info
->
ssel
)
{
nv
_mask
(
clk
,
0x1371d0
+
(
idx
*
0x04
),
0x8000003f
,
info
->
ddiv
);
nv
_wr32
(
clk
,
0x137160
+
(
idx
*
0x04
),
info
->
dsrc
);
nv
km_mask
(
device
,
0x1371d0
+
(
idx
*
0x04
),
0x8000003f
,
info
->
ddiv
);
nv
km_wr32
(
device
,
0x137160
+
(
idx
*
0x04
),
info
->
dsrc
);
}
}
static
void
gk104_clk_prog_1_0
(
struct
gk104_clk
*
clk
,
int
idx
)
{
nv_mask
(
clk
,
0x137100
,
(
1
<<
idx
),
0x00000000
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
0x137100
,
(
1
<<
idx
),
0x00000000
);
nv_wait
(
clk
,
0x137100
,
(
1
<<
idx
),
0x00000000
);
}
static
void
gk104_clk_prog_1_1
(
struct
gk104_clk
*
clk
,
int
idx
)
{
nv_mask
(
clk
,
0x137160
+
(
idx
*
0x04
),
0x00000100
,
0x00000000
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
0x137160
+
(
idx
*
0x04
),
0x00000100
,
0x00000000
);
}
static
void
gk104_clk_prog_2
(
struct
gk104_clk
*
clk
,
int
idx
)
{
struct
gk104_clk_info
*
info
=
&
clk
->
eng
[
idx
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
const
u32
addr
=
0x137000
+
(
idx
*
0x20
);
nv
_mask
(
clk
,
addr
+
0x00
,
0x00000004
,
0x00000000
);
nv
_mask
(
clk
,
addr
+
0x00
,
0x00000001
,
0x00000000
);
nv
km_mask
(
device
,
addr
+
0x00
,
0x00000004
,
0x00000000
);
nv
km_mask
(
device
,
addr
+
0x00
,
0x00000001
,
0x00000000
);
if
(
info
->
coef
)
{
nv
_wr32
(
clk
,
addr
+
0x04
,
info
->
coef
);
nv
_mask
(
clk
,
addr
+
0x00
,
0x00000001
,
0x00000001
);
nv
km_wr32
(
device
,
addr
+
0x04
,
info
->
coef
);
nv
km_mask
(
device
,
addr
+
0x00
,
0x00000001
,
0x00000001
);
nv_wait
(
clk
,
addr
+
0x00
,
0x00020000
,
0x00020000
);
nv
_mask
(
clk
,
addr
+
0x00
,
0x00020004
,
0x00000004
);
nv
km_mask
(
device
,
addr
+
0x00
,
0x00020004
,
0x00000004
);
}
}
...
...
@@ -387,18 +396,20 @@ static void
gk104_clk_prog_3
(
struct
gk104_clk
*
clk
,
int
idx
)
{
struct
gk104_clk_info
*
info
=
&
clk
->
eng
[
idx
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
if
(
info
->
ssel
)
nv
_mask
(
clk
,
0x137250
+
(
idx
*
0x04
),
0x00003f00
,
info
->
mdiv
);
nv
km_mask
(
device
,
0x137250
+
(
idx
*
0x04
),
0x00003f00
,
info
->
mdiv
);
else
nv
_mask
(
clk
,
0x137250
+
(
idx
*
0x04
),
0x0000003f
,
info
->
mdiv
);
nv
km_mask
(
device
,
0x137250
+
(
idx
*
0x04
),
0x0000003f
,
info
->
mdiv
);
}
static
void
gk104_clk_prog_4_0
(
struct
gk104_clk
*
clk
,
int
idx
)
{
struct
gk104_clk_info
*
info
=
&
clk
->
eng
[
idx
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
if
(
info
->
ssel
)
{
nv
_mask
(
clk
,
0x137100
,
(
1
<<
idx
),
info
->
ssel
);
nv
km_mask
(
device
,
0x137100
,
(
1
<<
idx
),
info
->
ssel
);
nv_wait
(
clk
,
0x137100
,
(
1
<<
idx
),
info
->
ssel
);
}
}
...
...
@@ -407,9 +418,10 @@ static void
gk104_clk_prog_4_1
(
struct
gk104_clk
*
clk
,
int
idx
)
{
struct
gk104_clk_info
*
info
=
&
clk
->
eng
[
idx
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
if
(
info
->
ssel
)
{
nv
_mask
(
clk
,
0x137160
+
(
idx
*
0x04
),
0x40000000
,
0x40000000
);
nv
_mask
(
clk
,
0x137160
+
(
idx
*
0x04
),
0x00000100
,
0x00000100
);
nv
km_mask
(
device
,
0x137160
+
(
idx
*
0x04
),
0x40000000
,
0x40000000
);
nv
km_mask
(
device
,
0x137160
+
(
idx
*
0x04
),
0x00000100
,
0x00000100
);
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
View file @
822ad79f
...
...
@@ -126,9 +126,10 @@ struct gk20a_clk {
static
void
gk20a_pllg_read_mnp
(
struct
gk20a_clk
*
clk
)
{
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
val
;
val
=
nv
_rd32
(
clk
,
GPCPLL_COEFF
);
val
=
nv
km_rd32
(
device
,
GPCPLL_COEFF
);
clk
->
m
=
(
val
>>
GPCPLL_COEFF_M_SHIFT
)
&
MASK
(
GPCPLL_COEFF_M_WIDTH
);
clk
->
n
=
(
val
>>
GPCPLL_COEFF_N_SHIFT
)
&
MASK
(
GPCPLL_COEFF_N_WIDTH
);
clk
->
pl
=
(
val
>>
GPCPLL_COEFF_P_SHIFT
)
&
MASK
(
GPCPLL_COEFF_P_WIDTH
);
...
...
@@ -265,51 +266,52 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
static
int
gk20a_pllg_slide
(
struct
gk20a_clk
*
clk
,
u32
n
)
{
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
val
;
int
ramp_timeout
;
/* get old coefficients */
val
=
nv
_rd32
(
clk
,
GPCPLL_COEFF
);
val
=
nv
km_rd32
(
device
,
GPCPLL_COEFF
);
/* do nothing if NDIV is the same */
if
(
n
==
((
val
>>
GPCPLL_COEFF_N_SHIFT
)
&
MASK
(
GPCPLL_COEFF_N_WIDTH
)))
return
0
;
/* setup */
nv
_mask
(
clk
,
GPCPLL_CFG2
,
0xff
<<
GPCPLL_CFG2_PLL_STEPA_SHIFT
,
nv
km_mask
(
device
,
GPCPLL_CFG2
,
0xff
<<
GPCPLL_CFG2_PLL_STEPA_SHIFT
,
0x2b
<<
GPCPLL_CFG2_PLL_STEPA_SHIFT
);
nv
_mask
(
clk
,
GPCPLL_CFG3
,
0xff
<<
GPCPLL_CFG3_PLL_STEPB_SHIFT
,
nv
km_mask
(
device
,
GPCPLL_CFG3
,
0xff
<<
GPCPLL_CFG3_PLL_STEPB_SHIFT
,
0xb
<<
GPCPLL_CFG3_PLL_STEPB_SHIFT
);
/* pll slowdown mode */
nv
_mask
(
clk
,
GPCPLL_NDIV_SLOWDOWN
,
nv
km_mask
(
device
,
GPCPLL_NDIV_SLOWDOWN
,
BIT
(
GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
),
BIT
(
GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
));
/* new ndiv ready for ramp */
val
=
nv
_rd32
(
clk
,
GPCPLL_COEFF
);
val
=
nv
km_rd32
(
device
,
GPCPLL_COEFF
);
val
&=
~
(
MASK
(
GPCPLL_COEFF_N_WIDTH
)
<<
GPCPLL_COEFF_N_SHIFT
);
val
|=
(
n
&
MASK
(
GPCPLL_COEFF_N_WIDTH
))
<<
GPCPLL_COEFF_N_SHIFT
;
udelay
(
1
);
nv
_wr32
(
clk
,
GPCPLL_COEFF
,
val
);
nv
km_wr32
(
device
,
GPCPLL_COEFF
,
val
);
/* dynamic ramp to new ndiv */
val
=
nv
_rd32
(
clk
,
GPCPLL_NDIV_SLOWDOWN
);
val
=
nv
km_rd32
(
device
,
GPCPLL_NDIV_SLOWDOWN
);
val
|=
0x1
<<
GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT
;
udelay
(
1
);
nv
_wr32
(
clk
,
GPCPLL_NDIV_SLOWDOWN
,
val
);
nv
km_wr32
(
device
,
GPCPLL_NDIV_SLOWDOWN
,
val
);
for
(
ramp_timeout
=
500
;
ramp_timeout
>
0
;
ramp_timeout
--
)
{
udelay
(
1
);
val
=
nv
_rd32
(
clk
,
GPC_BCAST_NDIV_SLOWDOWN_DEBUG
);
val
=
nv
km_rd32
(
device
,
GPC_BCAST_NDIV_SLOWDOWN_DEBUG
);
if
(
val
&
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK
)
break
;
}
/* exit slowdown mode */
nv
_mask
(
clk
,
GPCPLL_NDIV_SLOWDOWN
,
nv
km_mask
(
device
,
GPCPLL_NDIV_SLOWDOWN
,
BIT
(
GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
)
|
BIT
(
GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT
),
0
);
nv
_rd32
(
clk
,
GPCPLL_NDIV_SLOWDOWN
);
nv
km_rd32
(
device
,
GPCPLL_NDIV_SLOWDOWN
);
if
(
ramp_timeout
<=
0
)
{
nv_error
(
clk
,
"gpcpll dynamic ramp timeout
\n
"
);
...
...
@@ -322,30 +324,33 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
static
void
_gk20a_pllg_enable
(
struct
gk20a_clk
*
clk
)
{
nv_mask
(
clk
,
GPCPLL_CFG
,
GPCPLL_CFG_ENABLE
,
GPCPLL_CFG_ENABLE
);
nv_rd32
(
clk
,
GPCPLL_CFG
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
GPCPLL_CFG
,
GPCPLL_CFG_ENABLE
,
GPCPLL_CFG_ENABLE
);
nvkm_rd32
(
device
,
GPCPLL_CFG
);
}
static
void
_gk20a_pllg_disable
(
struct
gk20a_clk
*
clk
)
{
nv_mask
(
clk
,
GPCPLL_CFG
,
GPCPLL_CFG_ENABLE
,
0
);
nv_rd32
(
clk
,
GPCPLL_CFG
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
GPCPLL_CFG
,
GPCPLL_CFG_ENABLE
,
0
);
nvkm_rd32
(
device
,
GPCPLL_CFG
);
}
static
int
_gk20a_pllg_program_mnp
(
struct
gk20a_clk
*
clk
,
bool
allow_slide
)
{
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
val
,
cfg
;
u32
m_old
,
pl_old
,
n_lo
;
/* get old coefficients */
val
=
nv
_rd32
(
clk
,
GPCPLL_COEFF
);
val
=
nv
km_rd32
(
device
,
GPCPLL_COEFF
);
m_old
=
(
val
>>
GPCPLL_COEFF_M_SHIFT
)
&
MASK
(
GPCPLL_COEFF_M_WIDTH
);
pl_old
=
(
val
>>
GPCPLL_COEFF_P_SHIFT
)
&
MASK
(
GPCPLL_COEFF_P_WIDTH
);
/* do NDIV slide if there is no change in M and PL */
cfg
=
nv
_rd32
(
clk
,
GPCPLL_CFG
);
cfg
=
nv
km_rd32
(
device
,
GPCPLL_CFG
);
if
(
allow_slide
&&
clk
->
m
==
m_old
&&
clk
->
pl
==
pl_old
&&
(
cfg
&
GPCPLL_CFG_ENABLE
))
{
return
gk20a_pllg_slide
(
clk
,
clk
->
n
);
...
...
@@ -362,21 +367,21 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
}
/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
nv
_mask
(
clk
,
GPC2CLK_OUT
,
GPC2CLK_OUT_VCODIV_MASK
,
nv
km_mask
(
device
,
GPC2CLK_OUT
,
GPC2CLK_OUT_VCODIV_MASK
,
0x2
<<
GPC2CLK_OUT_VCODIV_SHIFT
);
/* put PLL in bypass before programming it */
val
=
nv
_rd32
(
clk
,
SEL_VCO
);
val
=
nv
km_rd32
(
device
,
SEL_VCO
);
val
&=
~
(
BIT
(
SEL_VCO_GPC2CLK_OUT_SHIFT
));
udelay
(
2
);
nv
_wr32
(
clk
,
SEL_VCO
,
val
);
nv
km_wr32
(
device
,
SEL_VCO
,
val
);
/* get out from IDDQ */
val
=
nv
_rd32
(
clk
,
GPCPLL_CFG
);
val
=
nv
km_rd32
(
device
,
GPCPLL_CFG
);
if
(
val
&
GPCPLL_CFG_IDDQ
)
{
val
&=
~
GPCPLL_CFG_IDDQ
;
nv
_wr32
(
clk
,
GPCPLL_CFG
,
val
);
nv
_rd32
(
clk
,
GPCPLL_CFG
);
nv
km_wr32
(
device
,
GPCPLL_CFG
,
val
);
nv
km_rd32
(
device
,
GPCPLL_CFG
);
udelay
(
2
);
}
...
...
@@ -390,14 +395,14 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
val
=
clk
->
m
<<
GPCPLL_COEFF_M_SHIFT
;
val
|=
(
allow_slide
?
n_lo
:
clk
->
n
)
<<
GPCPLL_COEFF_N_SHIFT
;
val
|=
clk
->
pl
<<
GPCPLL_COEFF_P_SHIFT
;
nv
_wr32
(
clk
,
GPCPLL_COEFF
,
val
);
nv
km_wr32
(
device
,
GPCPLL_COEFF
,
val
);
_gk20a_pllg_enable
(
clk
);
val
=
nv
_rd32
(
clk
,
GPCPLL_CFG
);
val
=
nv
km_rd32
(
device
,
GPCPLL_CFG
);
if
(
val
&
GPCPLL_CFG_LOCK_DET_OFF
)
{
val
&=
~
GPCPLL_CFG_LOCK_DET_OFF
;
nv
_wr32
(
clk
,
GPCPLL_CFG
,
val
);
nv
km_wr32
(
device
,
GPCPLL_CFG
,
val
);
}
if
(
!
nvkm_timer_wait_eq
(
clk
,
300000
,
GPCPLL_CFG
,
GPCPLL_CFG_LOCK
,
...
...
@@ -407,13 +412,13 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
}
/* switch to VCO mode */
nv
_mask
(
clk
,
SEL_VCO
,
0
,
BIT
(
SEL_VCO_GPC2CLK_OUT_SHIFT
));
nv
km_mask
(
device
,
SEL_VCO
,
0
,
BIT
(
SEL_VCO_GPC2CLK_OUT_SHIFT
));
/* restore out divider 1:1 */
val
=
nv
_rd32
(
clk
,
GPC2CLK_OUT
);
val
=
nv
km_rd32
(
device
,
GPC2CLK_OUT
);
val
&=
~
GPC2CLK_OUT_VCODIV_MASK
;
udelay
(
2
);
nv
_wr32
(
clk
,
GPC2CLK_OUT
,
val
);
nv
km_wr32
(
device
,
GPC2CLK_OUT
,
val
);
/* slide up to new NDIV */
return
allow_slide
?
gk20a_pllg_slide
(
clk
,
clk
->
n
)
:
0
;
...
...
@@ -434,14 +439,15 @@ gk20a_pllg_program_mnp(struct gk20a_clk *clk)
static
void
gk20a_pllg_disable
(
struct
gk20a_clk
*
clk
)
{
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
val
;
/* slide to VCO min */
val
=
nv
_rd32
(
clk
,
GPCPLL_CFG
);
val
=
nv
km_rd32
(
device
,
GPCPLL_CFG
);
if
(
val
&
GPCPLL_CFG_ENABLE
)
{
u32
coeff
,
m
,
n_lo
;
coeff
=
nv
_rd32
(
clk
,
GPCPLL_COEFF
);
coeff
=
nv
km_rd32
(
device
,
GPCPLL_COEFF
);
m
=
(
coeff
>>
GPCPLL_COEFF_M_SHIFT
)
&
MASK
(
GPCPLL_COEFF_M_WIDTH
);
n_lo
=
DIV_ROUND_UP
(
m
*
clk
->
params
->
min_vco
,
clk
->
parent_rate
/
MHZ
);
...
...
@@ -449,7 +455,7 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
}
/* put PLL in bypass before disabling it */
nv
_mask
(
clk
,
SEL_VCO
,
BIT
(
SEL_VCO_GPC2CLK_OUT_SHIFT
),
0
);
nv
km_mask
(
device
,
SEL_VCO
,
BIT
(
SEL_VCO_GPC2CLK_OUT_SHIFT
),
0
);
_gk20a_pllg_disable
(
clk
);
}
...
...
@@ -561,10 +567,11 @@ static int
gk20a_clk_read
(
struct
nvkm_clk
*
obj
,
enum
nv_clk_src
src
)
{
struct
gk20a_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
switch
(
src
)
{
case
nv_clk_src_crystal
:
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
case
nv_clk_src_gpc
:
gk20a_pllg_read_mnp
(
clk
);
return
gk20a_pllg_calc_rate
(
clk
)
/
GK20A_CLK_GPC_MDIV
;
...
...
@@ -613,9 +620,10 @@ static int
gk20a_clk_init
(
struct
nvkm_object
*
object
)
{
struct
gk20a_clk
*
clk
=
(
void
*
)
object
;
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
int
ret
;
nv
_mask
(
clk
,
GPC2CLK_OUT
,
GPC2CLK_OUT_INIT_MASK
,
GPC2CLK_OUT_INIT_VAL
);
nv
km_mask
(
device
,
GPC2CLK_OUT
,
GPC2CLK_OUT_INIT_MASK
,
GPC2CLK_OUT_INIT_VAL
);
ret
=
nvkm_clk_init
(
&
clk
->
base
);
if
(
ret
)
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
View file @
822ad79f
...
...
@@ -41,11 +41,12 @@ static u32 read_pll(struct gt215_clk *, int, u32);
static
u32
read_vco
(
struct
gt215_clk
*
clk
,
int
idx
)
{
u32
sctl
=
nv_rd32
(
clk
,
0x4120
+
(
idx
*
4
));
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
sctl
=
nvkm_rd32
(
device
,
0x4120
+
(
idx
*
4
));
switch
(
sctl
&
0x00000030
)
{
case
0x00000000
:
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
case
0x00000020
:
return
read_pll
(
clk
,
0x41
,
0x00e820
);
case
0x00000030
:
...
...
@@ -58,19 +59,20 @@ read_vco(struct gt215_clk *clk, int idx)
static
u32
read_clk
(
struct
gt215_clk
*
clk
,
int
idx
,
bool
ignore_en
)
{
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
sctl
,
sdiv
,
sclk
;
/* refclk for the 0xe8xx plls is a fixed frequency */
if
(
idx
>=
0x40
)
{
if
(
nv_device
(
clk
)
->
chipset
==
0xaf
)
{
if
(
device
->
chipset
==
0xaf
)
{
/* no joke.. seriously.. sigh.. */
return
nv
_rd32
(
clk
,
0x00471c
)
*
1000
;
return
nv
km_rd32
(
device
,
0x00471c
)
*
1000
;
}
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
}
sctl
=
nv
_rd32
(
clk
,
0x4120
+
(
idx
*
4
));
sctl
=
nv
km_rd32
(
device
,
0x4120
+
(
idx
*
4
));
if
(
!
ignore_en
&&
!
(
sctl
&
0x00000100
))
return
0
;
...
...
@@ -82,7 +84,7 @@ read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
switch
(
sctl
&
0x00003000
)
{
case
0x00000000
:
if
(
!
(
sctl
&
0x00000200
))
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
return
0
;
case
0x00002000
:
if
(
sctl
&
0x00000040
)
...
...
@@ -104,12 +106,13 @@ read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
static
u32
read_pll
(
struct
gt215_clk
*
clk
,
int
idx
,
u32
pll
)
{
u32
ctrl
=
nv_rd32
(
clk
,
pll
+
0
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ctrl
=
nvkm_rd32
(
device
,
pll
+
0
);
u32
sclk
=
0
,
P
=
1
,
N
=
1
,
M
=
1
;
if
(
!
(
ctrl
&
0x00000008
))
{
if
(
ctrl
&
0x00000001
)
{
u32
coef
=
nv
_rd32
(
clk
,
pll
+
4
);
u32
coef
=
nv
km_rd32
(
device
,
pll
+
4
);
M
=
(
coef
&
0x000000ff
)
>>
0
;
N
=
(
coef
&
0x0000ff00
)
>>
8
;
P
=
(
coef
&
0x003f0000
)
>>
16
;
...
...
@@ -136,11 +139,12 @@ static int
gt215_clk_read
(
struct
nvkm_clk
*
obj
,
enum
nv_clk_src
src
)
{
struct
gt215_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
hsrc
;
switch
(
src
)
{
case
nv_clk_src_crystal
:
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
case
nv_clk_src_core
:
case
nv_clk_src_core_intm
:
return
read_pll
(
clk
,
0x00
,
0x4200
);
...
...
@@ -155,7 +159,7 @@ gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
case
nv_clk_src_daemon
:
return
read_clk
(
clk
,
0x25
,
false
);
case
nv_clk_src_host
:
hsrc
=
(
nv
_rd32
(
clk
,
0xc040
)
&
0x30000000
)
>>
28
;
hsrc
=
(
nv
km_rd32
(
device
,
0xc040
)
&
0x30000000
)
>>
28
;
switch
(
hsrc
)
{
case
0
:
return
read_clk
(
clk
,
0x1d
,
false
);
...
...
@@ -297,11 +301,12 @@ calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
int
gt215_clk_pre
(
struct
nvkm_clk
*
clk
,
unsigned
long
*
flags
)
{
struct
nvkm_device
*
device
=
clk
->
subdev
.
device
;
struct
nvkm_fifo
*
fifo
=
nvkm_fifo
(
clk
);
/* halt and idle execution engines */
nv
_mask
(
clk
,
0x020060
,
0x00070000
,
0x00000000
);
nv
_mask
(
clk
,
0x002504
,
0x00000001
,
0x00000001
);
nv
km_mask
(
device
,
0x020060
,
0x00070000
,
0x00000000
);
nv
km_mask
(
device
,
0x002504
,
0x00000001
,
0x00000001
);
/* Wait until the interrupt handler is finished */
if
(
!
nv_wait
(
clk
,
0x000100
,
0xffffffff
,
0x00000000
))
return
-
EBUSY
;
...
...
@@ -320,26 +325,29 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
void
gt215_clk_post
(
struct
nvkm_clk
*
clk
,
unsigned
long
*
flags
)
{
struct
nvkm_device
*
device
=
clk
->
subdev
.
device
;
struct
nvkm_fifo
*
fifo
=
nvkm_fifo
(
clk
);
if
(
fifo
&&
flags
)
fifo
->
start
(
fifo
,
flags
);
nv
_mask
(
clk
,
0x002504
,
0x00000001
,
0x00000000
);
nv
_mask
(
clk
,
0x020060
,
0x00070000
,
0x00040000
);
nv
km_mask
(
device
,
0x002504
,
0x00000001
,
0x00000000
);
nv
km_mask
(
device
,
0x020060
,
0x00070000
,
0x00040000
);
}
static
void
disable_clk_src
(
struct
gt215_clk
*
clk
,
u32
src
)
{
nv_mask
(
clk
,
src
,
0x00000100
,
0x00000000
);
nv_mask
(
clk
,
src
,
0x00000001
,
0x00000000
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
src
,
0x00000100
,
0x00000000
);
nvkm_mask
(
device
,
src
,
0x00000001
,
0x00000000
);
}
static
void
prog_pll
(
struct
gt215_clk
*
clk
,
int
idx
,
u32
pll
,
int
dom
)
{
struct
gt215_clk_info
*
info
=
&
clk
->
eng
[
dom
];
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
const
u32
src0
=
0x004120
+
(
idx
*
4
);
const
u32
src1
=
0x004160
+
(
idx
*
4
);
const
u32
ctrl
=
pll
+
0
;
...
...
@@ -348,30 +356,30 @@ prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
if
(
info
->
pll
)
{
/* Always start from a non-PLL clock */
bypass
=
nv
_rd32
(
clk
,
ctrl
)
&
0x00000008
;
bypass
=
nv
km_rd32
(
device
,
ctrl
)
&
0x00000008
;
if
(
!
bypass
)
{
nv
_mask
(
clk
,
src1
,
0x00000101
,
0x00000101
);
nv
_mask
(
clk
,
ctrl
,
0x00000008
,
0x00000008
);
nv
km_mask
(
device
,
src1
,
0x00000101
,
0x00000101
);
nv
km_mask
(
device
,
ctrl
,
0x00000008
,
0x00000008
);
udelay
(
20
);
}
nv
_mask
(
clk
,
src0
,
0x003f3141
,
0x00000101
|
info
->
clk
);
nv
_wr32
(
clk
,
coef
,
info
->
pll
);
nv
_mask
(
clk
,
ctrl
,
0x00000015
,
0x00000015
);
nv
_mask
(
clk
,
ctrl
,
0x00000010
,
0x00000000
);
nv
km_mask
(
device
,
src0
,
0x003f3141
,
0x00000101
|
info
->
clk
);
nv
km_wr32
(
device
,
coef
,
info
->
pll
);
nv
km_mask
(
device
,
ctrl
,
0x00000015
,
0x00000015
);
nv
km_mask
(
device
,
ctrl
,
0x00000010
,
0x00000000
);
if
(
!
nv_wait
(
clk
,
ctrl
,
0x00020000
,
0x00020000
))
{
nv
_mask
(
clk
,
ctrl
,
0x00000010
,
0x00000010
);
nv
_mask
(
clk
,
src0
,
0x00000101
,
0x00000000
);
nv
km_mask
(
device
,
ctrl
,
0x00000010
,
0x00000010
);
nv
km_mask
(
device
,
src0
,
0x00000101
,
0x00000000
);
return
;
}
nv
_mask
(
clk
,
ctrl
,
0x00000010
,
0x00000010
);
nv
_mask
(
clk
,
ctrl
,
0x00000008
,
0x00000000
);
nv
km_mask
(
device
,
ctrl
,
0x00000010
,
0x00000010
);
nv
km_mask
(
device
,
ctrl
,
0x00000008
,
0x00000000
);
disable_clk_src
(
clk
,
src1
);
}
else
{
nv
_mask
(
clk
,
src1
,
0x003f3141
,
0x00000101
|
info
->
clk
);
nv
_mask
(
clk
,
ctrl
,
0x00000018
,
0x00000018
);
nv
km_mask
(
device
,
src1
,
0x003f3141
,
0x00000101
|
info
->
clk
);
nv
km_mask
(
device
,
ctrl
,
0x00000018
,
0x00000018
);
udelay
(
20
);
nv
_mask
(
clk
,
ctrl
,
0x00000001
,
0x00000000
);
nv
km_mask
(
device
,
ctrl
,
0x00000001
,
0x00000000
);
disable_clk_src
(
clk
,
src0
);
}
}
...
...
@@ -380,26 +388,28 @@ static void
prog_clk
(
struct
gt215_clk
*
clk
,
int
idx
,
int
dom
)
{
struct
gt215_clk_info
*
info
=
&
clk
->
eng
[
dom
];
nv_mask
(
clk
,
0x004120
+
(
idx
*
4
),
0x003f3141
,
0x00000101
|
info
->
clk
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
0x004120
+
(
idx
*
4
),
0x003f3141
,
0x00000101
|
info
->
clk
);
}
static
void
prog_host
(
struct
gt215_clk
*
clk
)
{
struct
gt215_clk_info
*
info
=
&
clk
->
eng
[
nv_clk_src_host
];
u32
hsrc
=
(
nv_rd32
(
clk
,
0xc040
));
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
hsrc
=
(
nvkm_rd32
(
device
,
0xc040
));
switch
(
info
->
host_out
)
{
case
NVA3_HOST_277
:
if
((
hsrc
&
0x30000000
)
==
0
)
{
nv
_wr32
(
clk
,
0xc040
,
hsrc
|
0x20000000
);
nv
km_wr32
(
device
,
0xc040
,
hsrc
|
0x20000000
);
disable_clk_src
(
clk
,
0x4194
);
}
break
;
case
NVA3_HOST_CLK
:
prog_clk
(
clk
,
0x1d
,
nv_clk_src_host
);
if
((
hsrc
&
0x30000000
)
>=
0x20000000
)
{
nv
_wr32
(
clk
,
0xc040
,
hsrc
&
~
0x30000000
);
nv
km_wr32
(
device
,
0xc040
,
hsrc
&
~
0x30000000
);
}
break
;
default:
...
...
@@ -407,22 +417,23 @@ prog_host(struct gt215_clk *clk)
}
/* This seems to be a clock gating factor on idle, always set to 64 */
nv
_wr32
(
clk
,
0xc044
,
0x3e
);
nv
km_wr32
(
device
,
0xc044
,
0x3e
);
}
static
void
prog_core
(
struct
gt215_clk
*
clk
,
int
dom
)
{
struct
gt215_clk_info
*
info
=
&
clk
->
eng
[
dom
];
u32
fb_delay
=
nv_rd32
(
clk
,
0x10002c
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
fb_delay
=
nvkm_rd32
(
device
,
0x10002c
);
if
(
fb_delay
<
info
->
fb_delay
)
nv
_wr32
(
clk
,
0x10002c
,
info
->
fb_delay
);
nv
km_wr32
(
device
,
0x10002c
,
info
->
fb_delay
);
prog_pll
(
clk
,
0x00
,
0x004200
,
dom
);
if
(
fb_delay
>
info
->
fb_delay
)
nv
_wr32
(
clk
,
0x10002c
,
info
->
fb_delay
);
nv
km_wr32
(
device
,
0x10002c
,
info
->
fb_delay
);
}
static
int
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
View file @
822ad79f
...
...
@@ -40,14 +40,16 @@ struct mcp77_clk {
static
u32
read_div
(
struct
mcp77_clk
*
clk
)
{
return
nv_rd32
(
clk
,
0x004600
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
return
nvkm_rd32
(
device
,
0x004600
);
}
static
u32
read_pll
(
struct
mcp77_clk
*
clk
,
u32
base
)
{
u32
ctrl
=
nv_rd32
(
clk
,
base
+
0
);
u32
coef
=
nv_rd32
(
clk
,
base
+
4
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ctrl
=
nvkm_rd32
(
device
,
base
+
0
);
u32
coef
=
nvkm_rd32
(
device
,
base
+
4
);
u32
ref
=
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_href
);
u32
post_div
=
0
;
u32
clock
=
0
;
...
...
@@ -55,10 +57,10 @@ read_pll(struct mcp77_clk *clk, u32 base)
switch
(
base
){
case
0x4020
:
post_div
=
1
<<
((
nv
_rd32
(
clk
,
0x4070
)
&
0x000f0000
)
>>
16
);
post_div
=
1
<<
((
nv
km_rd32
(
device
,
0x4070
)
&
0x000f0000
)
>>
16
);
break
;
case
0x4028
:
post_div
=
(
nv
_rd32
(
clk
,
0x4040
)
&
0x000f0000
)
>>
16
;
post_div
=
(
nv
km_rd32
(
device
,
0x4040
)
&
0x000f0000
)
>>
16
;
break
;
default:
break
;
...
...
@@ -78,12 +80,13 @@ static int
mcp77_clk_read
(
struct
nvkm_clk
*
obj
,
enum
nv_clk_src
src
)
{
struct
mcp77_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
u32
mast
=
nv_rd32
(
clk
,
0x00c054
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
mast
=
nvkm_rd32
(
device
,
0x00c054
);
u32
P
=
0
;
switch
(
src
)
{
case
nv_clk_src_crystal
:
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
case
nv_clk_src_href
:
return
100000
;
/* PCIE reference clock */
case
nv_clk_src_hclkm4
:
...
...
@@ -99,7 +102,7 @@ mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
}
break
;
case
nv_clk_src_core
:
P
=
(
nv
_rd32
(
clk
,
0x004028
)
&
0x00070000
)
>>
16
;
P
=
(
nv
km_rd32
(
device
,
0x004028
)
&
0x00070000
)
>>
16
;
switch
(
mast
&
0x00000003
)
{
case
0x00000000
:
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_crystal
)
>>
P
;
...
...
@@ -122,7 +125,7 @@ mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
default:
return
0
;
}
case
nv_clk_src_shader
:
P
=
(
nv
_rd32
(
clk
,
0x004020
)
&
0x00070000
)
>>
16
;
P
=
(
nv
km_rd32
(
device
,
0x004020
)
&
0x00070000
)
>>
16
;
switch
(
mast
&
0x00000030
)
{
case
0x00000000
:
if
(
mast
&
0x00000040
)
...
...
@@ -293,6 +296,7 @@ static int
mcp77_clk_prog
(
struct
nvkm_clk
*
obj
)
{
struct
mcp77_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
pllmask
=
0
,
mast
;
unsigned
long
flags
;
unsigned
long
*
f
=
&
flags
;
...
...
@@ -303,19 +307,19 @@ mcp77_clk_prog(struct nvkm_clk *obj)
goto
out
;
/* First switch to safe clocks: href */
mast
=
nv
_mask
(
clk
,
0xc054
,
0x03400e70
,
0x03400640
);
mast
=
nv
km_mask
(
device
,
0xc054
,
0x03400e70
,
0x03400640
);
mast
&=
~
0x00400e73
;
mast
|=
0x03000000
;
switch
(
clk
->
csrc
)
{
case
nv_clk_src_hclkm4
:
nv
_mask
(
clk
,
0x4028
,
0x00070000
,
clk
->
cctrl
);
nv
km_mask
(
device
,
0x4028
,
0x00070000
,
clk
->
cctrl
);
mast
|=
0x00000002
;
break
;
case
nv_clk_src_core
:
nv
_wr32
(
clk
,
0x402c
,
clk
->
ccoef
);
nv
_wr32
(
clk
,
0x4028
,
0x80000000
|
clk
->
cctrl
);
nv
_wr32
(
clk
,
0x4040
,
clk
->
cpost
);
nv
km_wr32
(
device
,
0x402c
,
clk
->
ccoef
);
nv
km_wr32
(
device
,
0x4028
,
0x80000000
|
clk
->
cctrl
);
nv
km_wr32
(
device
,
0x4040
,
clk
->
cpost
);
pllmask
|=
(
0x3
<<
8
);
mast
|=
0x00000003
;
break
;
...
...
@@ -326,17 +330,17 @@ mcp77_clk_prog(struct nvkm_clk *obj)
switch
(
clk
->
ssrc
)
{
case
nv_clk_src_href
:
nv
_mask
(
clk
,
0x4020
,
0x00070000
,
0x00000000
);
nv
km_mask
(
device
,
0x4020
,
0x00070000
,
0x00000000
);
/* mast |= 0x00000000; */
break
;
case
nv_clk_src_core
:
nv
_mask
(
clk
,
0x4020
,
0x00070000
,
clk
->
sctrl
);
nv
km_mask
(
device
,
0x4020
,
0x00070000
,
clk
->
sctrl
);
mast
|=
0x00000020
;
break
;
case
nv_clk_src_shader
:
nv
_wr32
(
clk
,
0x4024
,
clk
->
scoef
);
nv
_wr32
(
clk
,
0x4020
,
0x80000000
|
clk
->
sctrl
);
nv
_wr32
(
clk
,
0x4070
,
clk
->
spost
);
nv
km_wr32
(
device
,
0x4024
,
clk
->
scoef
);
nv
km_wr32
(
device
,
0x4020
,
0x80000000
|
clk
->
sctrl
);
nv
km_wr32
(
device
,
0x4070
,
clk
->
spost
);
pllmask
|=
(
0x3
<<
12
);
mast
|=
0x00000030
;
break
;
...
...
@@ -354,21 +358,21 @@ mcp77_clk_prog(struct nvkm_clk *obj)
case
nv_clk_src_cclk
:
mast
|=
0x00400000
;
default:
nv
_wr32
(
clk
,
0x4600
,
clk
->
vdiv
);
nv
km_wr32
(
device
,
0x4600
,
clk
->
vdiv
);
}
nv
_wr32
(
clk
,
0xc054
,
mast
);
nv
km_wr32
(
device
,
0xc054
,
mast
);
resume:
/* Disable some PLLs and dividers when unused */
if
(
clk
->
csrc
!=
nv_clk_src_core
)
{
nv
_wr32
(
clk
,
0x4040
,
0x00000000
);
nv
_mask
(
clk
,
0x4028
,
0x80000000
,
0x00000000
);
nv
km_wr32
(
device
,
0x4040
,
0x00000000
);
nv
km_mask
(
device
,
0x4028
,
0x80000000
,
0x00000000
);
}
if
(
clk
->
ssrc
!=
nv_clk_src_shader
)
{
nv
_wr32
(
clk
,
0x4070
,
0x00000000
);
nv
_mask
(
clk
,
0x4020
,
0x80000000
,
0x00000000
);
nv
km_wr32
(
device
,
0x4070
,
0x00000000
);
nv
km_mask
(
device
,
0x4020
,
0x80000000
,
0x00000000
);
}
out:
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
View file @
822ad79f
...
...
@@ -48,7 +48,8 @@ nv40_domain[] = {
static
u32
read_pll_1
(
struct
nv40_clk
*
clk
,
u32
reg
)
{
u32
ctrl
=
nv_rd32
(
clk
,
reg
+
0x00
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ctrl
=
nvkm_rd32
(
device
,
reg
+
0x00
);
int
P
=
(
ctrl
&
0x00070000
)
>>
16
;
int
N
=
(
ctrl
&
0x0000ff00
)
>>
8
;
int
M
=
(
ctrl
&
0x000000ff
)
>>
0
;
...
...
@@ -63,8 +64,9 @@ read_pll_1(struct nv40_clk *clk, u32 reg)
static
u32
read_pll_2
(
struct
nv40_clk
*
clk
,
u32
reg
)
{
u32
ctrl
=
nv_rd32
(
clk
,
reg
+
0x00
);
u32
coef
=
nv_rd32
(
clk
,
reg
+
0x04
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
ctrl
=
nvkm_rd32
(
device
,
reg
+
0x00
);
u32
coef
=
nvkm_rd32
(
device
,
reg
+
0x04
);
int
N2
=
(
coef
&
0xff000000
)
>>
24
;
int
M2
=
(
coef
&
0x00ff0000
)
>>
16
;
int
N1
=
(
coef
&
0x0000ff00
)
>>
8
;
...
...
@@ -104,11 +106,12 @@ static int
nv40_clk_read
(
struct
nvkm_clk
*
obj
,
enum
nv_clk_src
src
)
{
struct
nv40_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
u32
mast
=
nv_rd32
(
clk
,
0x00c040
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
mast
=
nvkm_rd32
(
device
,
0x00c040
);
switch
(
src
)
{
case
nv_clk_src_crystal
:
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
case
nv_clk_src_href
:
return
100000
;
/*XXX: PCIE/AGP differ*/
case
nv_clk_src_core
:
...
...
@@ -191,12 +194,13 @@ static int
nv40_clk_prog
(
struct
nvkm_clk
*
obj
)
{
struct
nv40_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
nv_mask
(
clk
,
0x00c040
,
0x00000333
,
0x00000000
);
nv_wr32
(
clk
,
0x004004
,
clk
->
npll_coef
);
nv_mask
(
clk
,
0x004000
,
0xc0070100
,
clk
->
npll_ctrl
);
nv_mask
(
clk
,
0x004008
,
0xc007ffff
,
clk
->
spll
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
nvkm_mask
(
device
,
0x00c040
,
0x00000333
,
0x00000000
);
nvkm_wr32
(
device
,
0x004004
,
clk
->
npll_coef
);
nvkm_mask
(
device
,
0x004000
,
0xc0070100
,
clk
->
npll_ctrl
);
nvkm_mask
(
device
,
0x004008
,
0xc007ffff
,
clk
->
spll
);
mdelay
(
5
);
nv
_mask
(
clk
,
0x00c040
,
0x00000333
,
clk
->
ctrl
);
nv
km_mask
(
device
,
0x00c040
,
0x00000333
,
clk
->
ctrl
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
View file @
822ad79f
...
...
@@ -31,17 +31,18 @@
static
u32
read_div
(
struct
nv50_clk
*
clk
)
{
switch
(
nv_device
(
clk
)
->
chipset
)
{
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
switch
(
device
->
chipset
)
{
case
0x50
:
/* it exists, but only has bit 31, not the dividers.. */
case
0x84
:
case
0x86
:
case
0x98
:
case
0xa0
:
return
nv
_rd32
(
clk
,
0x004700
);
return
nv
km_rd32
(
device
,
0x004700
);
case
0x92
:
case
0x94
:
case
0x96
:
return
nv
_rd32
(
clk
,
0x004800
);
return
nv
km_rd32
(
device
,
0x004800
);
default:
return
0x00000000
;
}
...
...
@@ -50,11 +51,12 @@ read_div(struct nv50_clk *clk)
static
u32
read_pll_src
(
struct
nv50_clk
*
clk
,
u32
base
)
{
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
coef
,
ref
=
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_crystal
);
u32
rsel
=
nv
_rd32
(
clk
,
0x00e18c
);
u32
rsel
=
nv
km_rd32
(
device
,
0x00e18c
);
int
P
,
N
,
M
,
id
;
switch
(
nv_device
(
clk
)
->
chipset
)
{
switch
(
device
->
chipset
)
{
case
0x50
:
case
0xa0
:
switch
(
base
)
{
...
...
@@ -67,7 +69,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
return
0
;
}
coef
=
nv
_rd32
(
clk
,
0x00e81c
+
(
id
*
0x0c
));
coef
=
nv
km_rd32
(
device
,
0x00e81c
+
(
id
*
0x0c
));
ref
*=
(
coef
&
0x01000000
)
?
2
:
4
;
P
=
(
coef
&
0x00070000
)
>>
16
;
N
=
((
coef
&
0x0000ff00
)
>>
8
)
+
1
;
...
...
@@ -76,7 +78,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
case
0x84
:
case
0x86
:
case
0x92
:
coef
=
nv
_rd32
(
clk
,
0x00e81c
);
coef
=
nv
km_rd32
(
device
,
0x00e81c
);
P
=
(
coef
&
0x00070000
)
>>
16
;
N
=
(
coef
&
0x0000ff00
)
>>
8
;
M
=
(
coef
&
0x000000ff
)
>>
0
;
...
...
@@ -84,7 +86,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
case
0x94
:
case
0x96
:
case
0x98
:
rsel
=
nv
_rd32
(
clk
,
0x00c050
);
rsel
=
nv
km_rd32
(
device
,
0x00c050
);
switch
(
base
)
{
case
0x4020
:
rsel
=
(
rsel
&
0x00000003
)
>>
0
;
break
;
case
0x4008
:
rsel
=
(
rsel
&
0x0000000c
)
>>
2
;
break
;
...
...
@@ -102,8 +104,8 @@ read_pll_src(struct nv50_clk *clk, u32 base)
case
3
:
id
=
0
;
break
;
}
coef
=
nv
_rd32
(
clk
,
0x00e81c
+
(
id
*
0x28
));
P
=
(
nv
_rd32
(
clk
,
0x00e824
+
(
id
*
0x28
))
>>
16
)
&
7
;
coef
=
nv
km_rd32
(
device
,
0x00e81c
+
(
id
*
0x28
));
P
=
(
nv
km_rd32
(
device
,
0x00e824
+
(
id
*
0x28
))
>>
16
)
&
7
;
P
+=
(
coef
&
0x00070000
)
>>
16
;
N
=
(
coef
&
0x0000ff00
)
>>
8
;
M
=
(
coef
&
0x000000ff
)
>>
0
;
...
...
@@ -121,7 +123,8 @@ read_pll_src(struct nv50_clk *clk, u32 base)
static
u32
read_pll_ref
(
struct
nv50_clk
*
clk
,
u32
base
)
{
u32
src
,
mast
=
nv_rd32
(
clk
,
0x00c040
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
src
,
mast
=
nvkm_rd32
(
device
,
0x00c040
);
switch
(
base
)
{
case
0x004028
:
...
...
@@ -152,16 +155,17 @@ read_pll_ref(struct nv50_clk *clk, u32 base)
static
u32
read_pll
(
struct
nv50_clk
*
clk
,
u32
base
)
{
u32
mast
=
nv_rd32
(
clk
,
0x00c040
);
u32
ctrl
=
nv_rd32
(
clk
,
base
+
0
);
u32
coef
=
nv_rd32
(
clk
,
base
+
4
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
mast
=
nvkm_rd32
(
device
,
0x00c040
);
u32
ctrl
=
nvkm_rd32
(
device
,
base
+
0
);
u32
coef
=
nvkm_rd32
(
device
,
base
+
4
);
u32
ref
=
read_pll_ref
(
clk
,
base
);
u32
freq
=
0
;
int
N1
,
N2
,
M1
,
M2
;
if
(
base
==
0x004028
&&
(
mast
&
0x00100000
))
{
/* wtf, appears to only disable post-divider on gt200 */
if
(
nv_device
(
clk
)
->
chipset
!=
0xa0
)
if
(
device
->
chipset
!=
0xa0
)
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_dom6
);
}
...
...
@@ -186,12 +190,13 @@ static int
nv50_clk_read
(
struct
nvkm_clk
*
obj
,
enum
nv_clk_src
src
)
{
struct
nv50_clk
*
clk
=
container_of
(
obj
,
typeof
(
*
clk
),
base
);
u32
mast
=
nv_rd32
(
clk
,
0x00c040
);
struct
nvkm_device
*
device
=
clk
->
base
.
subdev
.
device
;
u32
mast
=
nvkm_rd32
(
device
,
0x00c040
);
u32
P
=
0
;
switch
(
src
)
{
case
nv_clk_src_crystal
:
return
nv_device
(
clk
)
->
crystal
;
return
device
->
crystal
;
case
nv_clk_src_href
:
return
100000
;
/* PCIE reference clock */
case
nv_clk_src_hclk
:
...
...
@@ -210,7 +215,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
break
;
case
nv_clk_src_core
:
if
(
!
(
mast
&
0x00100000
))
P
=
(
nv
_rd32
(
clk
,
0x004028
)
&
0x00070000
)
>>
16
;
P
=
(
nv
km_rd32
(
device
,
0x004028
)
&
0x00070000
)
>>
16
;
switch
(
mast
&
0x00000003
)
{
case
0x00000000
:
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_crystal
)
>>
P
;
case
0x00000001
:
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_dom6
);
...
...
@@ -219,7 +224,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
}
break
;
case
nv_clk_src_shader
:
P
=
(
nv
_rd32
(
clk
,
0x004020
)
&
0x00070000
)
>>
16
;
P
=
(
nv
km_rd32
(
device
,
0x004020
)
&
0x00070000
)
>>
16
;
switch
(
mast
&
0x00000030
)
{
case
0x00000000
:
if
(
mast
&
0x00000080
)
...
...
@@ -231,8 +236,8 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
}
break
;
case
nv_clk_src_mem
:
P
=
(
nv
_rd32
(
clk
,
0x004008
)
&
0x00070000
)
>>
16
;
if
(
nv
_rd32
(
clk
,
0x004008
)
&
0x00000200
)
{
P
=
(
nv
km_rd32
(
device
,
0x004008
)
&
0x00070000
)
>>
16
;
if
(
nv
km_rd32
(
device
,
0x004008
)
&
0x00000200
)
{
switch
(
mast
&
0x0000c000
)
{
case
0x00000000
:
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_crystal
)
>>
P
;
...
...
@@ -246,7 +251,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
break
;
case
nv_clk_src_vdec
:
P
=
(
read_div
(
clk
)
&
0x00000700
)
>>
8
;
switch
(
nv_device
(
clk
)
->
chipset
)
{
switch
(
device
->
chipset
)
{
case
0x84
:
case
0x86
:
case
0x92
:
...
...
@@ -255,7 +260,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
case
0xa0
:
switch
(
mast
&
0x00000c00
)
{
case
0x00000000
:
if
(
nv_device
(
clk
)
->
chipset
==
0xa0
)
/* wtf?? */
if
(
device
->
chipset
==
0xa0
)
/* wtf?? */
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_core
)
>>
P
;
return
clk
->
base
.
read
(
&
clk
->
base
,
nv_clk_src_crystal
)
>>
P
;
case
0x00000400
:
...
...
@@ -283,7 +288,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
}
break
;
case
nv_clk_src_dom6
:
switch
(
nv_device
(
clk
)
->
chipset
)
{
switch
(
device
->
chipset
)
{
case
0x50
:
case
0xa0
:
return
read_pll
(
clk
,
0x00e810
)
>>
2
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment