Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
76946074
Commit
76946074
authored
Nov 06, 2002
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ppc64: rework ppc64 hashtable management
parent
962a7e51
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
133 additions
and
219 deletions
+133
-219
arch/ppc64/kernel/htab.c
arch/ppc64/kernel/htab.c
+52
-27
arch/ppc64/kernel/pSeries_htab.c
arch/ppc64/kernel/pSeries_htab.c
+41
-113
arch/ppc64/kernel/pSeries_lpar.c
arch/ppc64/kernel/pSeries_lpar.c
+15
-64
arch/ppc64/mm/init.c
arch/ppc64/mm/init.c
+22
-8
include/asm-ppc64/machdep.h
include/asm-ppc64/machdep.h
+3
-7
No files found.
arch/ppc64/kernel/htab.c
View file @
76946074
...
@@ -62,7 +62,6 @@
...
@@ -62,7 +62,6 @@
HTAB
htab_data
=
{
NULL
,
0
,
0
,
0
,
0
};
HTAB
htab_data
=
{
NULL
,
0
,
0
,
0
,
0
};
extern
unsigned
long
_SDR1
;
extern
unsigned
long
_SDR1
;
extern
unsigned
long
klimit
;
#define KB (1024)
#define KB (1024)
#define MB (1024*KB)
#define MB (1024*KB)
...
@@ -77,10 +76,9 @@ loop_forever(void)
...
@@ -77,10 +76,9 @@ loop_forever(void)
static
inline
void
static
inline
void
create_pte_mapping
(
unsigned
long
start
,
unsigned
long
end
,
create_pte_mapping
(
unsigned
long
start
,
unsigned
long
end
,
unsigned
long
mode
,
unsigned
long
mask
,
int
large
)
unsigned
long
mode
,
int
large
)
{
{
unsigned
long
addr
;
unsigned
long
addr
;
HPTE
*
htab
=
(
HPTE
*
)
__v2a
(
htab_data
.
htab
);
unsigned
int
step
;
unsigned
int
step
;
if
(
large
)
if
(
large
)
...
@@ -89,14 +87,33 @@ create_pte_mapping(unsigned long start, unsigned long end,
...
@@ -89,14 +87,33 @@ create_pte_mapping(unsigned long start, unsigned long end,
step
=
4
*
KB
;
step
=
4
*
KB
;
for
(
addr
=
start
;
addr
<
end
;
addr
+=
step
)
{
for
(
addr
=
start
;
addr
<
end
;
addr
+=
step
)
{
unsigned
long
vpn
,
hash
,
hpteg
;
unsigned
long
vsid
=
get_kernel_vsid
(
addr
);
unsigned
long
vsid
=
get_kernel_vsid
(
addr
);
unsigned
long
va
=
(
vsid
<<
28
)
|
(
addr
&
0xfffffff
);
unsigned
long
va
=
(
vsid
<<
28
)
|
(
addr
&
0xfffffff
);
int
ret
;
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
else
vpn
=
va
>>
PAGE_SHIFT
;
hash
=
hpt_hash
(
vpn
,
large
);
hpteg
=
((
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
);
if
(
naca
->
platform
==
PLATFORM_PSERIES_LPAR
)
if
(
naca
->
platform
==
PLATFORM_PSERIES_LPAR
)
pSeries_lpar_make_pte
(
htab
,
va
,
ret
=
pSeries_lpar_hpte_insert
(
hpteg
,
va
,
(
unsigned
long
)
__v2a
(
addr
),
mode
,
mask
,
large
);
(
unsigned
long
)
__v2a
(
addr
)
>>
PAGE_SHIFT
,
0
,
mode
,
1
,
large
);
else
else
pSeries_make_pte
(
htab
,
va
,
ret
=
pSeries_hpte_insert
(
hpteg
,
va
,
(
unsigned
long
)
__v2a
(
addr
),
mode
,
mask
,
large
);
(
unsigned
long
)
__v2a
(
addr
)
>>
PAGE_SHIFT
,
0
,
mode
,
1
,
large
);
if
(
ret
==
-
1
)
{
ppc64_terminate_msg
(
0x20
,
"create_pte_mapping"
);
loop_forever
();
}
}
}
}
}
...
@@ -105,7 +122,7 @@ htab_initialize(void)
...
@@ -105,7 +122,7 @@ htab_initialize(void)
{
{
unsigned
long
table
,
htab_size_bytes
;
unsigned
long
table
,
htab_size_bytes
;
unsigned
long
pteg_count
;
unsigned
long
pteg_count
;
unsigned
long
mode_rw
,
mask
;
unsigned
long
mode_rw
;
/*
/*
* Calculate the required size of the htab. We want the number of
* Calculate the required size of the htab. We want the number of
...
@@ -146,19 +163,18 @@ htab_initialize(void)
...
@@ -146,19 +163,18 @@ htab_initialize(void)
}
}
mode_rw
=
_PAGE_ACCESSED
|
_PAGE_COHERENT
|
PP_RWXX
;
mode_rw
=
_PAGE_ACCESSED
|
_PAGE_COHERENT
|
PP_RWXX
;
mask
=
pteg_count
-
1
;
/* XXX we currently map kernel text rw, should fix this */
/* XXX we currently map kernel text rw, should fix this */
if
(
cpu_has_largepage
()
&&
naca
->
physicalMemorySize
>
256
*
MB
)
{
if
(
cpu_has_largepage
()
&&
naca
->
physicalMemorySize
>
256
*
MB
)
{
create_pte_mapping
((
unsigned
long
)
KERNELBASE
,
create_pte_mapping
((
unsigned
long
)
KERNELBASE
,
KERNELBASE
+
256
*
MB
,
mode_rw
,
mask
,
0
);
KERNELBASE
+
256
*
MB
,
mode_rw
,
0
);
create_pte_mapping
((
unsigned
long
)
KERNELBASE
+
256
*
MB
,
create_pte_mapping
((
unsigned
long
)
KERNELBASE
+
256
*
MB
,
KERNELBASE
+
(
naca
->
physicalMemorySize
),
KERNELBASE
+
(
naca
->
physicalMemorySize
),
mode_rw
,
mask
,
1
);
mode_rw
,
1
);
}
else
{
}
else
{
create_pte_mapping
((
unsigned
long
)
KERNELBASE
,
create_pte_mapping
((
unsigned
long
)
KERNELBASE
,
KERNELBASE
+
(
naca
->
physicalMemorySize
),
KERNELBASE
+
(
naca
->
physicalMemorySize
),
mode_rw
,
mask
,
0
);
mode_rw
,
0
);
}
}
}
}
#undef KB
#undef KB
...
@@ -204,7 +220,7 @@ static inline unsigned long computeHptePP(unsigned long pte)
...
@@ -204,7 +220,7 @@ static inline unsigned long computeHptePP(unsigned long pte)
* to be valid via Linux page tables, return 1. If handled return 0
* to be valid via Linux page tables, return 1. If handled return 0
*/
*/
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
pte_t
*
ptep
,
unsigned
long
trap
)
pte_t
*
ptep
,
unsigned
long
trap
,
int
local
)
{
{
unsigned
long
va
,
vpn
;
unsigned
long
va
,
vpn
;
unsigned
long
newpp
,
prpn
;
unsigned
long
newpp
,
prpn
;
...
@@ -212,9 +228,16 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
...
@@ -212,9 +228,16 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
long
slot
;
long
slot
;
pte_t
old_pte
,
new_pte
;
pte_t
old_pte
,
new_pte
;
/* XXX fix for large ptes */
int
large
=
0
;
/* Search the Linux page table for a match with va */
/* Search the Linux page table for a match with va */
va
=
(
vsid
<<
28
)
|
(
ea
&
0x0fffffff
);
va
=
(
vsid
<<
28
)
|
(
ea
&
0x0fffffff
);
vpn
=
va
>>
PAGE_SHIFT
;
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
else
vpn
=
va
>>
PAGE_SHIFT
;
/*
/*
* If no pte found or not present, send the problem up to
* If no pte found or not present, send the problem up to
...
@@ -276,16 +299,14 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
...
@@ -276,16 +299,14 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
/* There MIGHT be an HPTE for this pte */
/* There MIGHT be an HPTE for this pte */
unsigned
long
hash
,
slot
,
secondary
;
unsigned
long
hash
,
slot
,
secondary
;
/* XXX fix large pte flag */
hash
=
hpt_hash
(
vpn
,
large
);
hash
=
hpt_hash
(
vpn
,
0
);
secondary
=
(
pte_val
(
old_pte
)
&
_PAGE_SECONDARY
)
>>
15
;
secondary
=
(
pte_val
(
old_pte
)
&
_PAGE_SECONDARY
)
>>
15
;
if
(
secondary
)
if
(
secondary
)
hash
=
~
hash
;
hash
=
~
hash
;
slot
=
(
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
;
slot
=
(
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
;
slot
+=
(
pte_val
(
old_pte
)
&
_PAGE_GROUP_IX
)
>>
12
;
slot
+=
(
pte_val
(
old_pte
)
&
_PAGE_GROUP_IX
)
>>
12
;
/* XXX fix large pte flag */
if
(
ppc_md
.
hpte_updatepp
(
slot
,
newpp
,
va
,
large
,
local
)
==
-
1
)
if
(
ppc_md
.
hpte_updatepp
(
slot
,
newpp
,
va
,
0
)
==
-
1
)
pte_val
(
old_pte
)
&=
~
_PAGE_HPTEFLAGS
;
pte_val
(
old_pte
)
&=
~
_PAGE_HPTEFLAGS
;
else
else
if
(
!
pte_same
(
old_pte
,
new_pte
))
if
(
!
pte_same
(
old_pte
,
new_pte
))
...
@@ -293,8 +314,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
...
@@ -293,8 +314,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
}
}
if
(
likely
(
!
(
pte_val
(
old_pte
)
&
_PAGE_HASHPTE
)))
{
if
(
likely
(
!
(
pte_val
(
old_pte
)
&
_PAGE_HASHPTE
)))
{
/* XXX fix large pte flag */
unsigned
long
hash
=
hpt_hash
(
vpn
,
large
);
unsigned
long
hash
=
hpt_hash
(
vpn
,
0
);
unsigned
long
hpte_group
;
unsigned
long
hpte_group
;
prpn
=
pte_val
(
old_pte
)
>>
PTE_SHIFT
;
prpn
=
pte_val
(
old_pte
)
>>
PTE_SHIFT
;
...
@@ -309,18 +329,16 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
...
@@ -309,18 +329,16 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
/* copy appropriate flags from linux pte */
/* copy appropriate flags from linux pte */
hpteflags
=
(
pte_val
(
new_pte
)
&
0x1f8
)
|
newpp
;
hpteflags
=
(
pte_val
(
new_pte
)
&
0x1f8
)
|
newpp
;
/* XXX fix large pte flag */
slot
=
ppc_md
.
hpte_insert
(
hpte_group
,
va
,
prpn
,
0
,
slot
=
ppc_md
.
hpte_insert
(
hpte_group
,
vpn
,
prpn
,
0
,
hpteflags
,
0
,
large
);
hpteflags
,
0
,
0
);
/* Primary is full, try the secondary */
/* Primary is full, try the secondary */
if
(
slot
==
-
1
)
{
if
(
slot
==
-
1
)
{
pte_val
(
new_pte
)
|=
1
<<
15
;
pte_val
(
new_pte
)
|=
1
<<
15
;
hpte_group
=
((
~
hash
&
htab_data
.
htab_hash_mask
)
*
hpte_group
=
((
~
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
)
&
~
0x7UL
;
HPTES_PER_GROUP
)
&
~
0x7UL
;
/* XXX fix large pte flag */
slot
=
ppc_md
.
hpte_insert
(
hpte_group
,
va
,
prpn
,
slot
=
ppc_md
.
hpte_insert
(
hpte_group
,
vpn
,
prpn
,
1
,
hpteflags
,
0
,
large
);
1
,
hpteflags
,
0
,
0
);
if
(
slot
==
-
1
)
{
if
(
slot
==
-
1
)
{
if
(
mftb
()
&
0x1
)
if
(
mftb
()
&
0x1
)
hpte_group
=
((
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
)
&
~
0x7UL
;
hpte_group
=
((
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
)
&
~
0x7UL
;
...
@@ -351,6 +369,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
...
@@ -351,6 +369,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
struct
mm_struct
*
mm
;
struct
mm_struct
*
mm
;
pte_t
*
ptep
;
pte_t
*
ptep
;
int
ret
;
int
ret
;
int
user_region
=
0
;
int
local
=
0
;
/* Check for invalid addresses. */
/* Check for invalid addresses. */
if
(
!
IS_VALID_EA
(
ea
))
if
(
!
IS_VALID_EA
(
ea
))
...
@@ -358,6 +378,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
...
@@ -358,6 +378,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
switch
(
REGION_ID
(
ea
))
{
switch
(
REGION_ID
(
ea
))
{
case
USER_REGION_ID
:
case
USER_REGION_ID
:
user_region
=
1
;
mm
=
current
->
mm
;
mm
=
current
->
mm
;
if
(
mm
==
NULL
)
if
(
mm
==
NULL
)
return
1
;
return
1
;
...
@@ -401,8 +422,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
...
@@ -401,8 +422,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
* from modifying entries while we search and update
* from modifying entries while we search and update
*/
*/
spin_lock
(
&
mm
->
page_table_lock
);
spin_lock
(
&
mm
->
page_table_lock
);
if
(
user_region
&&
(
mm
->
cpu_vm_mask
==
(
1
<<
smp_processor_id
())))
local
=
1
;
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
,
trap
);
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
,
trap
,
local
);
spin_unlock
(
&
mm
->
page_table_lock
);
spin_unlock
(
&
mm
->
page_table_lock
);
return
ret
;
return
ret
;
...
...
arch/ppc64/kernel/pSeries_htab.c
View file @
76946074
...
@@ -22,46 +22,6 @@
...
@@ -22,46 +22,6 @@
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/tlb.h>
/*
* Create a pte. Used during initialization only.
* We assume the PTE will fit in the primary PTEG.
*/
void
pSeries_make_pte
(
HPTE
*
htab
,
unsigned
long
va
,
unsigned
long
pa
,
int
mode
,
unsigned
long
hash_mask
,
int
large
)
{
HPTE
*
hptep
;
unsigned
long
hash
,
i
;
unsigned
long
vpn
;
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
else
vpn
=
va
>>
PAGE_SHIFT
;
hash
=
hpt_hash
(
vpn
,
large
);
hptep
=
htab
+
((
hash
&
hash_mask
)
*
HPTES_PER_GROUP
);
for
(
i
=
0
;
i
<
8
;
++
i
,
++
hptep
)
{
if
(
hptep
->
dw0
.
dw0
.
v
==
0
)
{
/* !valid */
hptep
->
dw1
.
dword1
=
pa
|
mode
;
hptep
->
dw0
.
dword0
=
0
;
hptep
->
dw0
.
dw0
.
avpn
=
va
>>
23
;
hptep
->
dw0
.
dw0
.
bolted
=
1
;
/* bolted */
if
(
large
)
{
hptep
->
dw0
.
dw0
.
l
=
1
;
hptep
->
dw0
.
dw0
.
avpn
&=
~
0x1UL
;
}
hptep
->
dw0
.
dw0
.
v
=
1
;
/* make valid */
return
;
}
}
/* We should _never_ get here and too early to call xmon. */
while
(
1
)
;
}
#define HPTE_LOCK_BIT 3
#define HPTE_LOCK_BIT 3
static
inline
void
pSeries_lock_hpte
(
HPTE
*
hptep
)
static
inline
void
pSeries_lock_hpte
(
HPTE
*
hptep
)
...
@@ -72,7 +32,7 @@ static inline void pSeries_lock_hpte(HPTE *hptep)
...
@@ -72,7 +32,7 @@ static inline void pSeries_lock_hpte(HPTE *hptep)
if
(
!
test_and_set_bit
(
HPTE_LOCK_BIT
,
word
))
if
(
!
test_and_set_bit
(
HPTE_LOCK_BIT
,
word
))
break
;
break
;
while
(
test_bit
(
HPTE_LOCK_BIT
,
word
))
while
(
test_bit
(
HPTE_LOCK_BIT
,
word
))
barrier
();
cpu_relax
();
}
}
}
}
...
@@ -86,11 +46,10 @@ static inline void pSeries_unlock_hpte(HPTE *hptep)
...
@@ -86,11 +46,10 @@ static inline void pSeries_unlock_hpte(HPTE *hptep)
static
spinlock_t
pSeries_tlbie_lock
=
SPIN_LOCK_UNLOCKED
;
static
spinlock_t
pSeries_tlbie_lock
=
SPIN_LOCK_UNLOCKED
;
static
long
pSeries_hpte_insert
(
unsigned
long
hpte_group
,
unsigned
long
vpn
,
long
pSeries_hpte_insert
(
unsigned
long
hpte_group
,
unsigned
long
va
,
unsigned
long
prpn
,
int
secondary
,
unsigned
long
prpn
,
int
secondary
,
unsigned
long
hpteflags
,
int
bolted
,
int
large
)
unsigned
long
hpteflags
,
int
bolted
,
int
large
)
{
{
unsigned
long
avpn
=
vpn
>>
11
;
unsigned
long
arpn
=
physRpn_to_absRpn
(
prpn
);
unsigned
long
arpn
=
physRpn_to_absRpn
(
prpn
);
HPTE
*
hptep
=
htab_data
.
htab
+
hpte_group
;
HPTE
*
hptep
=
htab_data
.
htab
+
hpte_group
;
Hpte_dword0
dw0
;
Hpte_dword0
dw0
;
...
@@ -120,13 +79,15 @@ static long pSeries_hpte_insert(unsigned long hpte_group, unsigned long vpn,
...
@@ -120,13 +79,15 @@ static long pSeries_hpte_insert(unsigned long hpte_group, unsigned long vpn,
lhpte
.
dw1
.
flags
.
flags
=
hpteflags
;
lhpte
.
dw1
.
flags
.
flags
=
hpteflags
;
lhpte
.
dw0
.
dword0
=
0
;
lhpte
.
dw0
.
dword0
=
0
;
lhpte
.
dw0
.
dw0
.
avpn
=
avpn
;
lhpte
.
dw0
.
dw0
.
avpn
=
va
>>
23
;
lhpte
.
dw0
.
dw0
.
h
=
secondary
;
lhpte
.
dw0
.
dw0
.
h
=
secondary
;
lhpte
.
dw0
.
dw0
.
bolted
=
bolted
;
lhpte
.
dw0
.
dw0
.
bolted
=
bolted
;
lhpte
.
dw0
.
dw0
.
v
=
1
;
lhpte
.
dw0
.
dw0
.
v
=
1
;
if
(
large
)
if
(
large
)
{
lhpte
.
dw0
.
dw0
.
l
=
1
;
lhpte
.
dw0
.
dw0
.
l
=
1
;
lhpte
.
dw0
.
dw0
.
avpn
&=
~
0x1UL
;
}
hptep
->
dw1
.
dword1
=
lhpte
.
dw1
.
dword1
;
hptep
->
dw1
.
dword1
=
lhpte
.
dw1
.
dword1
;
...
@@ -150,17 +111,10 @@ static long pSeries_hpte_remove(unsigned long hpte_group)
...
@@ -150,17 +111,10 @@ static long pSeries_hpte_remove(unsigned long hpte_group)
Hpte_dword0
dw0
;
Hpte_dword0
dw0
;
int
i
;
int
i
;
int
slot_offset
;
int
slot_offset
;
unsigned
long
vsid
,
group
,
pi
,
pi_high
;
unsigned
long
slot
;
unsigned
long
flags
;
int
large
;
unsigned
long
va
;
/* pick a random
slot
to start at */
/* pick a random
entry
to start at */
slot_offset
=
mftb
()
&
0x7
;
slot_offset
=
mftb
()
&
0x7
;
udbg_printf
(
"remove_hpte in %d
\n
"
,
slot_offset
);
for
(
i
=
0
;
i
<
HPTES_PER_GROUP
;
i
++
)
{
for
(
i
=
0
;
i
<
HPTES_PER_GROUP
;
i
++
)
{
hptep
=
htab_data
.
htab
+
hpte_group
+
slot_offset
;
hptep
=
htab_data
.
htab
+
hpte_group
+
slot_offset
;
dw0
=
hptep
->
dw0
.
dw0
;
dw0
=
hptep
->
dw0
.
dw0
;
...
@@ -181,30 +135,9 @@ static long pSeries_hpte_remove(unsigned long hpte_group)
...
@@ -181,30 +135,9 @@ static long pSeries_hpte_remove(unsigned long hpte_group)
if
(
i
==
HPTES_PER_GROUP
)
if
(
i
==
HPTES_PER_GROUP
)
return
-
1
;
return
-
1
;
large
=
dw0
.
l
;
/* Invalidate the hpte. NOTE: this also unlocks it */
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep
->
dw0
.
dword0
=
0
;
hptep
->
dw0
.
dword0
=
0
;
/* Invalidate the tlb */
vsid
=
dw0
.
avpn
>>
5
;
slot
=
hptep
-
htab_data
.
htab
;
group
=
slot
>>
3
;
if
(
dw0
.
h
)
group
=
~
group
;
pi
=
(
vsid
^
group
)
&
0x7ff
;
pi_high
=
(
dw0
.
avpn
&
0x1f
)
<<
11
;
pi
|=
pi_high
;
if
(
large
)
va
=
pi
<<
LARGE_PAGE_SHIFT
;
else
va
=
pi
<<
PAGE_SHIFT
;
spin_lock_irqsave
(
&
pSeries_tlbie_lock
,
flags
);
_tlbie
(
va
,
large
);
spin_unlock_irqrestore
(
&
pSeries_tlbie_lock
,
flags
);
return
i
;
return
i
;
}
}
...
@@ -259,41 +192,40 @@ static long pSeries_hpte_find(unsigned long vpn)
...
@@ -259,41 +192,40 @@ static long pSeries_hpte_find(unsigned long vpn)
}
}
static
long
pSeries_hpte_updatepp
(
unsigned
long
slot
,
unsigned
long
newpp
,
static
long
pSeries_hpte_updatepp
(
unsigned
long
slot
,
unsigned
long
newpp
,
unsigned
long
va
,
int
large
)
unsigned
long
va
,
int
large
,
int
local
)
{
{
HPTE
*
hptep
=
htab_data
.
htab
+
slot
;
HPTE
*
hptep
=
htab_data
.
htab
+
slot
;
Hpte_dword0
dw0
;
Hpte_dword0
dw0
;
unsigned
long
vpn
,
avpn
;
unsigned
long
avpn
=
va
>>
23
;
unsigned
long
flags
;
unsigned
long
flags
;
int
ret
=
0
;
if
(
large
)
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
avpn
&=
~
0x1UL
;
else
vpn
=
va
>>
PAGE_SHIFT
;
avpn
=
vpn
>>
11
;
pSeries_lock_hpte
(
hptep
);
pSeries_lock_hpte
(
hptep
);
dw0
=
hptep
->
dw0
.
dw0
;
dw0
=
hptep
->
dw0
.
dw0
;
/* Even if we miss, we need to invalidate the TLB */
if
((
dw0
.
avpn
!=
avpn
)
||
!
dw0
.
v
)
{
if
((
dw0
.
avpn
!=
avpn
)
||
!
dw0
.
v
)
{
pSeries_unlock_hpte
(
hptep
);
pSeries_unlock_hpte
(
hptep
);
udbg_printf
(
"updatepp missed
\n
"
);
ret
=
-
1
;
return
-
1
;
}
else
{
set_pp_bit
(
newpp
,
hptep
);
pSeries_unlock_hpte
(
hptep
);
}
}
set_pp_bit
(
newpp
,
hptep
);
pSeries_unlock_hpte
(
hptep
);
/* Ensure it is out of the tlb too */
/* Ensure it is out of the tlb too */
/* XXX use tlbiel where possible */
if
(
cpu_has_tlbiel
()
&&
!
large
&&
local
)
{
spin_lock_irqsave
(
&
pSeries_tlbie_lock
,
flags
);
_tlbiel
(
va
);
_tlbie
(
va
,
large
);
}
else
{
spin_unlock_irqrestore
(
&
pSeries_tlbie_lock
,
flags
);
spin_lock_irqsave
(
&
pSeries_tlbie_lock
,
flags
);
_tlbie
(
va
,
large
);
spin_unlock_irqrestore
(
&
pSeries_tlbie_lock
,
flags
);
}
return
0
;
return
ret
;
}
}
/*
/*
...
@@ -322,7 +254,6 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
...
@@ -322,7 +254,6 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
set_pp_bit
(
newpp
,
hptep
);
set_pp_bit
(
newpp
,
hptep
);
/* Ensure it is out of the tlb too */
/* Ensure it is out of the tlb too */
/* XXX use tlbiel where possible */
spin_lock_irqsave
(
&
pSeries_tlbie_lock
,
flags
);
spin_lock_irqsave
(
&
pSeries_tlbie_lock
,
flags
);
_tlbie
(
va
,
0
);
_tlbie
(
va
,
0
);
spin_unlock_irqrestore
(
&
pSeries_tlbie_lock
,
flags
);
spin_unlock_irqrestore
(
&
pSeries_tlbie_lock
,
flags
);
...
@@ -333,29 +264,24 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
...
@@ -333,29 +264,24 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
{
{
HPTE
*
hptep
=
htab_data
.
htab
+
slot
;
HPTE
*
hptep
=
htab_data
.
htab
+
slot
;
Hpte_dword0
dw0
;
Hpte_dword0
dw0
;
unsigned
long
vpn
,
avpn
;
unsigned
long
avpn
=
va
>>
23
;
unsigned
long
flags
;
unsigned
long
flags
;
if
(
large
)
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
avpn
&=
~
0x1UL
;
else
vpn
=
va
>>
PAGE_SHIFT
;
avpn
=
vpn
>>
11
;
pSeries_lock_hpte
(
hptep
);
pSeries_lock_hpte
(
hptep
);
dw0
=
hptep
->
dw0
.
dw0
;
dw0
=
hptep
->
dw0
.
dw0
;
/* Even if we miss, we need to invalidate the TLB */
if
((
dw0
.
avpn
!=
avpn
)
||
!
dw0
.
v
)
{
if
((
dw0
.
avpn
!=
avpn
)
||
!
dw0
.
v
)
{
pSeries_unlock_hpte
(
hptep
);
pSeries_unlock_hpte
(
hptep
);
udbg_printf
(
"invalidate missed
\n
"
);
}
else
{
return
;
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep
->
dw0
.
dword0
=
0
;
}
}
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep
->
dw0
.
dword0
=
0
;
/* Invalidate the tlb */
/* Invalidate the tlb */
if
(
cpu_has_tlbiel
()
&&
!
large
&&
local
)
{
if
(
cpu_has_tlbiel
()
&&
!
large
&&
local
)
{
_tlbiel
(
va
);
_tlbiel
(
va
);
...
@@ -374,6 +300,7 @@ static void pSeries_flush_hash_range(unsigned long context,
...
@@ -374,6 +300,7 @@ static void pSeries_flush_hash_range(unsigned long context,
HPTE
*
hptep
;
HPTE
*
hptep
;
Hpte_dword0
dw0
;
Hpte_dword0
dw0
;
struct
ppc64_tlb_batch
*
batch
=
&
ppc64_tlb_batch
[
smp_processor_id
()];
struct
ppc64_tlb_batch
*
batch
=
&
ppc64_tlb_batch
[
smp_processor_id
()];
/* XXX fix for large ptes */
/* XXX fix for large ptes */
unsigned
long
large
=
0
;
unsigned
long
large
=
0
;
...
@@ -399,22 +326,24 @@ static void pSeries_flush_hash_range(unsigned long context,
...
@@ -399,22 +326,24 @@ static void pSeries_flush_hash_range(unsigned long context,
slot
+=
(
pte_val
(
batch
->
pte
[
i
])
&
_PAGE_GROUP_IX
)
>>
12
;
slot
+=
(
pte_val
(
batch
->
pte
[
i
])
&
_PAGE_GROUP_IX
)
>>
12
;
hptep
=
htab_data
.
htab
+
slot
;
hptep
=
htab_data
.
htab
+
slot
;
avpn
=
vpn
>>
11
;
avpn
=
va
>>
23
;
if
(
large
)
avpn
&=
~
0x1UL
;
pSeries_lock_hpte
(
hptep
);
pSeries_lock_hpte
(
hptep
);
dw0
=
hptep
->
dw0
.
dw0
;
dw0
=
hptep
->
dw0
.
dw0
;
/* Even if we miss, we need to invalidate the TLB */
if
((
dw0
.
avpn
!=
avpn
)
||
!
dw0
.
v
)
{
if
((
dw0
.
avpn
!=
avpn
)
||
!
dw0
.
v
)
{
pSeries_unlock_hpte
(
hptep
);
pSeries_unlock_hpte
(
hptep
);
udbg_printf
(
"invalidate missed
\n
"
);
}
else
{
continue
;
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep
->
dw0
.
dword0
=
0
;
}
}
j
++
;
j
++
;
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep
->
dw0
.
dword0
=
0
;
}
}
if
(
cpu_has_tlbiel
()
&&
!
large
&&
local
)
{
if
(
cpu_has_tlbiel
()
&&
!
large
&&
local
)
{
...
@@ -457,7 +386,6 @@ void hpte_init_pSeries(void)
...
@@ -457,7 +386,6 @@ void hpte_init_pSeries(void)
ppc_md
.
hpte_updateboltedpp
=
pSeries_hpte_updateboltedpp
;
ppc_md
.
hpte_updateboltedpp
=
pSeries_hpte_updateboltedpp
;
ppc_md
.
hpte_insert
=
pSeries_hpte_insert
;
ppc_md
.
hpte_insert
=
pSeries_hpte_insert
;
ppc_md
.
hpte_remove
=
pSeries_hpte_remove
;
ppc_md
.
hpte_remove
=
pSeries_hpte_remove
;
ppc_md
.
make_pte
=
pSeries_make_pte
;
/* Disable TLB batching on nighthawk */
/* Disable TLB batching on nighthawk */
root
=
find_path_device
(
"/"
);
root
=
find_path_device
(
"/"
);
...
...
arch/ppc64/kernel/pSeries_lpar.c
View file @
76946074
...
@@ -403,67 +403,11 @@ int hvc_count(int *start_termno)
...
@@ -403,67 +403,11 @@ int hvc_count(int *start_termno)
long
pSeries_lpar_hpte_insert
(
unsigned
long
hpte_group
,
/*
unsigned
long
va
,
unsigned
long
prpn
,
* Create a pte - LPAR . Used during initialization only.
int
secondary
,
unsigned
long
hpteflags
,
* We assume the PTE will fit in the primary PTEG.
int
bolted
,
int
large
)
*/
void
pSeries_lpar_make_pte
(
HPTE
*
htab
,
unsigned
long
va
,
unsigned
long
pa
,
int
mode
,
unsigned
long
hash_mask
,
int
large
)
{
HPTE
local_hpte
;
unsigned
long
hash
,
slot
,
flags
,
lpar_rc
,
vpn
;
unsigned
long
dummy1
,
dummy2
;
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
else
vpn
=
va
>>
PAGE_SHIFT
;
hash
=
hpt_hash
(
vpn
,
large
);
slot
=
((
hash
&
hash_mask
)
*
HPTES_PER_GROUP
);
local_hpte
.
dw1
.
dword1
=
pa
|
mode
;
local_hpte
.
dw0
.
dword0
=
0
;
local_hpte
.
dw0
.
dw0
.
avpn
=
va
>>
23
;
local_hpte
.
dw0
.
dw0
.
bolted
=
1
;
/* bolted */
if
(
large
)
{
local_hpte
.
dw0
.
dw0
.
l
=
1
;
/* large page */
local_hpte
.
dw0
.
dw0
.
avpn
&=
~
0x1UL
;
}
local_hpte
.
dw0
.
dw0
.
v
=
1
;
/* Set CEC cookie to 0 */
/* Zero page = 0 */
/* I-cache Invalidate = 0 */
/* I-cache synchronize = 0 */
/* Exact = 0 - modify any entry in group */
flags
=
0
;
lpar_rc
=
plpar_pte_enter
(
flags
,
slot
,
local_hpte
.
dw0
.
dword0
,
local_hpte
.
dw1
.
dword1
,
&
dummy1
,
&
dummy2
);
if
(
lpar_rc
==
H_PTEG_Full
)
{
while
(
1
)
;
}
/*
* NOTE: we explicitly do not check return status here because it is
* "normal" for early boot code to map io regions for which a partition
* has no access. However, we will die if we actually fault on these
* "permission denied" pages.
*/
}
static
long
pSeries_lpar_hpte_insert
(
unsigned
long
hpte_group
,
unsigned
long
vpn
,
unsigned
long
prpn
,
int
secondary
,
unsigned
long
hpteflags
,
int
bolted
,
int
large
)
{
{
/* XXX fix for large page */
unsigned
long
avpn
=
vpn
>>
11
;
unsigned
long
arpn
=
physRpn_to_absRpn
(
prpn
);
unsigned
long
arpn
=
physRpn_to_absRpn
(
prpn
);
unsigned
long
lpar_rc
;
unsigned
long
lpar_rc
;
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -476,13 +420,15 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
...
@@ -476,13 +420,15 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
lhpte
.
dw1
.
flags
.
flags
=
hpteflags
;
lhpte
.
dw1
.
flags
.
flags
=
hpteflags
;
lhpte
.
dw0
.
dword0
=
0
;
lhpte
.
dw0
.
dword0
=
0
;
lhpte
.
dw0
.
dw0
.
avpn
=
avpn
;
lhpte
.
dw0
.
dw0
.
avpn
=
va
>>
23
;
lhpte
.
dw0
.
dw0
.
h
=
secondary
;
lhpte
.
dw0
.
dw0
.
h
=
secondary
;
lhpte
.
dw0
.
dw0
.
bolted
=
bolted
;
lhpte
.
dw0
.
dw0
.
bolted
=
bolted
;
lhpte
.
dw0
.
dw0
.
v
=
1
;
lhpte
.
dw0
.
dw0
.
v
=
1
;
if
(
large
)
if
(
large
)
{
lhpte
.
dw0
.
dw0
.
l
=
1
;
lhpte
.
dw0
.
dw0
.
l
=
1
;
lhpte
.
dw0
.
dw0
.
avpn
&=
~
0x1UL
;
}
/* Now fill in the actual HPTE */
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
/* Set CEC cookie to 0 */
...
@@ -559,12 +505,15 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
...
@@ -559,12 +505,15 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
* already zero. For now I am paranoid.
* already zero. For now I am paranoid.
*/
*/
static
long
pSeries_lpar_hpte_updatepp
(
unsigned
long
slot
,
unsigned
long
newpp
,
static
long
pSeries_lpar_hpte_updatepp
(
unsigned
long
slot
,
unsigned
long
newpp
,
unsigned
long
va
,
int
large
)
unsigned
long
va
,
int
large
,
int
local
)
{
{
unsigned
long
lpar_rc
;
unsigned
long
lpar_rc
;
unsigned
long
flags
=
(
newpp
&
7
)
|
H_AVPN
;
unsigned
long
flags
=
(
newpp
&
7
)
|
H_AVPN
;
unsigned
long
avpn
=
va
>>
23
;
unsigned
long
avpn
=
va
>>
23
;
if
(
large
)
avpn
&=
~
0x1UL
;
lpar_rc
=
plpar_pte_protect
(
flags
,
slot
,
(
avpn
<<
7
));
lpar_rc
=
plpar_pte_protect
(
flags
,
slot
,
(
avpn
<<
7
));
if
(
lpar_rc
==
H_Not_Found
)
{
if
(
lpar_rc
==
H_Not_Found
)
{
...
@@ -662,6 +611,9 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
...
@@ -662,6 +611,9 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
unsigned
long
lpar_rc
;
unsigned
long
lpar_rc
;
unsigned
long
dummy1
,
dummy2
;
unsigned
long
dummy1
,
dummy2
;
if
(
large
)
avpn
&=
~
0x1UL
;
lpar_rc
=
plpar_pte_remove
(
H_AVPN
,
slot
,
(
avpn
<<
7
),
&
dummy1
,
lpar_rc
=
plpar_pte_remove
(
H_AVPN
,
slot
,
(
avpn
<<
7
),
&
dummy1
,
&
dummy2
);
&
dummy2
);
...
@@ -700,6 +652,5 @@ void pSeries_lpar_mm_init(void)
...
@@ -700,6 +652,5 @@ void pSeries_lpar_mm_init(void)
ppc_md
.
hpte_updateboltedpp
=
pSeries_lpar_hpte_updateboltedpp
;
ppc_md
.
hpte_updateboltedpp
=
pSeries_lpar_hpte_updateboltedpp
;
ppc_md
.
hpte_insert
=
pSeries_lpar_hpte_insert
;
ppc_md
.
hpte_insert
=
pSeries_lpar_hpte_insert
;
ppc_md
.
hpte_remove
=
pSeries_lpar_hpte_remove
;
ppc_md
.
hpte_remove
=
pSeries_lpar_hpte_remove
;
ppc_md
.
make_pte
=
pSeries_lpar_make_pte
;
ppc_md
.
flush_hash_range
=
pSeries_lpar_flush_hash_range
;
ppc_md
.
flush_hash_range
=
pSeries_lpar_flush_hash_range
;
}
}
arch/ppc64/mm/init.c
View file @
76946074
...
@@ -220,16 +220,26 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
...
@@ -220,16 +220,26 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
set_pte
(
ptep
,
pfn_pte
(
pa
>>
PAGE_SHIFT
,
__pgprot
(
flags
)));
set_pte
(
ptep
,
pfn_pte
(
pa
>>
PAGE_SHIFT
,
__pgprot
(
flags
)));
spin_unlock
(
&
ioremap_mm
.
page_table_lock
);
spin_unlock
(
&
ioremap_mm
.
page_table_lock
);
}
else
{
}
else
{
/* If the mm subsystem is not fully up, we cannot create a
unsigned
long
va
,
vpn
,
hash
,
hpteg
;
/*
* If the mm subsystem is not fully up, we cannot create a
* linux page table entry for this mapping. Simply bolt an
* linux page table entry for this mapping. Simply bolt an
* entry in the hardware page table.
* entry in the hardware page table.
*/
*/
vsid
=
get_kernel_vsid
(
ea
);
vsid
=
get_kernel_vsid
(
ea
);
ppc_md
.
make_pte
(
htab_data
.
htab
,
va
=
(
vsid
<<
28
)
|
(
ea
&
0xFFFFFFF
);
(
vsid
<<
28
)
|
(
ea
&
0xFFFFFFF
),
// va (NOT the ea)
vpn
=
va
>>
PAGE_SHIFT
;
pa
,
_PAGE_NO_CACHE
|
_PAGE_GUARDED
|
PP_RWXX
,
hash
=
hpt_hash
(
vpn
,
0
);
htab_data
.
htab_hash_mask
,
0
);
hpteg
=
((
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
);
if
(
ppc_md
.
hpte_insert
(
hpteg
,
va
,
pa
>>
PAGE_SHIFT
,
0
,
_PAGE_NO_CACHE
|
_PAGE_GUARDED
|
PP_RWXX
,
1
,
0
)
==
-
1
)
{
panic
(
"map_io_page: could not insert mapping"
);
}
}
}
}
}
...
@@ -649,7 +659,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
...
@@ -649,7 +659,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
extern
pte_t
*
find_linux_pte
(
pgd_t
*
pgdir
,
unsigned
long
ea
);
extern
pte_t
*
find_linux_pte
(
pgd_t
*
pgdir
,
unsigned
long
ea
);
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
pte_t
*
ptep
,
unsigned
long
trap
);
pte_t
*
ptep
,
unsigned
long
trap
,
int
local
);
/*
/*
* This is called at the end of handling a user page fault, when the
* This is called at the end of handling a user page fault, when the
...
@@ -665,6 +675,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
...
@@ -665,6 +675,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
unsigned
long
vsid
;
unsigned
long
vsid
;
void
*
pgdir
;
void
*
pgdir
;
pte_t
*
ptep
;
pte_t
*
ptep
;
int
local
=
0
;
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if
(
!
pte_young
(
pte
))
if
(
!
pte_young
(
pte
))
...
@@ -677,6 +688,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
...
@@ -677,6 +688,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ptep
=
find_linux_pte
(
pgdir
,
ea
);
vsid
=
get_vsid
(
vma
->
vm_mm
->
context
,
ea
);
vsid
=
get_vsid
(
vma
->
vm_mm
->
context
,
ea
);
if
(
vma
->
vm_mm
->
cpu_vm_mask
==
(
1
<<
smp_processor_id
()))
local
=
1
;
__hash_page
(
ea
,
pte_val
(
pte
)
&
(
_PAGE_USER
|
_PAGE_RW
),
vsid
,
ptep
,
__hash_page
(
ea
,
pte_val
(
pte
)
&
(
_PAGE_USER
|
_PAGE_RW
),
vsid
,
ptep
,
0x300
);
0x300
,
local
);
}
}
include/asm-ppc64/machdep.h
View file @
76946074
...
@@ -37,11 +37,12 @@ struct machdep_calls {
...
@@ -37,11 +37,12 @@ struct machdep_calls {
long
(
*
hpte_updatepp
)(
unsigned
long
slot
,
long
(
*
hpte_updatepp
)(
unsigned
long
slot
,
unsigned
long
newpp
,
unsigned
long
newpp
,
unsigned
long
va
,
unsigned
long
va
,
int
large
);
int
large
,
int
local
);
void
(
*
hpte_updateboltedpp
)(
unsigned
long
newpp
,
void
(
*
hpte_updateboltedpp
)(
unsigned
long
newpp
,
unsigned
long
ea
);
unsigned
long
ea
);
long
(
*
hpte_insert
)(
unsigned
long
hpte_group
,
long
(
*
hpte_insert
)(
unsigned
long
hpte_group
,
unsigned
long
v
pn
,
unsigned
long
v
a
,
unsigned
long
prpn
,
unsigned
long
prpn
,
int
secondary
,
int
secondary
,
unsigned
long
hpteflags
,
unsigned
long
hpteflags
,
...
@@ -51,11 +52,6 @@ struct machdep_calls {
...
@@ -51,11 +52,6 @@ struct machdep_calls {
void
(
*
flush_hash_range
)(
unsigned
long
context
,
void
(
*
flush_hash_range
)(
unsigned
long
context
,
unsigned
long
number
,
unsigned
long
number
,
int
local
);
int
local
);
void
(
*
make_pte
)(
void
*
htab
,
unsigned
long
va
,
unsigned
long
pa
,
int
mode
,
unsigned
long
hash_mask
,
int
large
);
void
(
*
tce_build
)(
struct
TceTable
*
tbl
,
void
(
*
tce_build
)(
struct
TceTable
*
tbl
,
long
tcenum
,
long
tcenum
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment