Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
bf851860
Commit
bf851860
authored
Aug 23, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://ppc.bkbits.net/for-linus-ppc64
into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents
6a8e8a44
96eacb6b
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
118 additions
and
170 deletions
+118
-170
arch/ppc64/kernel/Makefile
arch/ppc64/kernel/Makefile
+1
-1
arch/ppc64/mm/Makefile
arch/ppc64/mm/Makefile
+2
-1
arch/ppc64/mm/stab.c
arch/ppc64/mm/stab.c
+104
-141
include/asm-ppc64/mmu.h
include/asm-ppc64/mmu.h
+9
-25
include/asm-ppc64/mmu_context.h
include/asm-ppc64/mmu_context.h
+2
-2
No files found.
arch/ppc64/kernel/Makefile
View file @
bf851860
...
...
@@ -7,7 +7,7 @@ extra-y := head.o vmlinux.lds
obj-y
:=
setup.o entry.o traps.o irq.o idle.o dma.o
\
time.o process.o signal.o syscalls.o misc.o ptrace.o
\
align.o semaphore.o bitops.o
stab.o
pacaData.o
\
align.o semaphore.o bitops.o pacaData.o
\
udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o
\
ptrace32.o signal32.o rtc.o init_task.o
\
lmb.o cputable.o cpu_setup_power4.o idle_power4.o
\
...
...
arch/ppc64/mm/Makefile
View file @
bf851860
...
...
@@ -4,6 +4,7 @@
EXTRA_CFLAGS
+=
-mno-minimal-toc
obj-y
:=
fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o slb_low.o slb.o
obj-y
:=
fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o
\
slb_low.o slb.o stab.o
obj-$(CONFIG_DISCONTIGMEM)
+=
numa.o
obj-$(CONFIG_HUGETLB_PAGE)
+=
hugetlbpage.o
arch/ppc64/
kernel
/stab.c
→
arch/ppc64/
mm
/stab.c
View file @
bf851860
...
...
@@ -5,7 +5,7 @@
* Copyright (c) 2001 Dave Engebretsen
*
* Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
...
...
@@ -20,74 +20,44 @@
#include <asm/naca.h>
#include <asm/cputable.h>
static
int
make_ste
(
unsigned
long
stab
,
unsigned
long
esid
,
unsigned
long
vsid
);
void
slb_initialize
(
void
);
/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
* entries are faulted in.
*/
void
stab_initialize
(
unsigned
long
stab
)
{
unsigned
long
vsid
=
get_kernel_vsid
(
KERNELBASE
);
if
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
)
{
slb_initialize
();
}
else
{
asm
volatile
(
"isync; slbia; isync"
:::
"memory"
);
make_ste
(
stab
,
GET_ESID
(
KERNELBASE
),
vsid
);
/* Order update */
asm
volatile
(
"sync"
:::
"memory"
);
}
}
/* Both the segment table and SLB code uses the following cache */
#define NR_STAB_CACHE_ENTRIES 8
DEFINE_PER_CPU
(
long
,
stab_cache_ptr
);
DEFINE_PER_CPU
(
long
,
stab_cache
[
NR_STAB_CACHE_ENTRIES
]);
/*
* Segment table stuff
*/
/*
* Create a segment table entry for the given esid/vsid pair.
*/
static
int
make_ste
(
unsigned
long
stab
,
unsigned
long
esid
,
unsigned
long
vsid
)
{
unsigned
long
esid_data
,
vsid_data
;
unsigned
long
entry
,
group
,
old_esid
,
castout_entry
,
i
;
unsigned
int
global_entry
;
STE
*
ste
,
*
castout_ste
;
unsigned
long
kernel_segment
=
(
REGION_ID
(
esid
<<
SID_SHIFT
)
!=
USER_REGION_ID
);
struct
stab_entry
*
ste
,
*
castout_ste
;
unsigned
long
kernel_segment
=
(
esid
<<
SID_SHIFT
)
>=
KERNELBASE
;
vsid_data
=
vsid
<<
STE_VSID_SHIFT
;
esid_data
=
esid
<<
SID_SHIFT
|
STE_ESID_KP
|
STE_ESID_V
;
if
(
!
kernel_segment
)
esid_data
|=
STE_ESID_KS
;
/* Search the primary group first. */
global_entry
=
(
esid
&
0x1f
)
<<
3
;
ste
=
(
STE
*
)(
stab
|
((
esid
&
0x1f
)
<<
7
));
ste
=
(
struct
stab_entry
*
)(
stab
|
((
esid
&
0x1f
)
<<
7
));
/* Find an empty entry, if one exists. */
for
(
group
=
0
;
group
<
2
;
group
++
)
{
for
(
entry
=
0
;
entry
<
8
;
entry
++
,
ste
++
)
{
if
(
!
(
ste
->
dw0
.
dw0
.
v
))
{
ste
->
dw0
.
dword0
=
0
;
ste
->
dw1
.
dword1
=
0
;
ste
->
dw1
.
dw1
.
vsid
=
vsid
;
ste
->
dw0
.
dw0
.
esid
=
esid
;
ste
->
dw0
.
dw0
.
kp
=
1
;
if
(
!
kernel_segment
)
ste
->
dw0
.
dw0
.
ks
=
1
;
if
(
!
(
ste
->
esid_data
&
STE_ESID_V
))
{
ste
->
vsid_data
=
vsid_data
;
asm
volatile
(
"eieio"
:::
"memory"
);
ste
->
dw0
.
dw0
.
v
=
1
;
ste
->
esid_data
=
esid_data
;
return
(
global_entry
|
entry
);
}
}
/* Now search the secondary group. */
global_entry
=
((
~
esid
)
&
0x1f
)
<<
3
;
ste
=
(
STE
*
)(
stab
|
(((
~
esid
)
&
0x1f
)
<<
7
));
ste
=
(
struct
stab_entry
*
)(
stab
|
(((
~
esid
)
&
0x1f
)
<<
7
));
}
/*
...
...
@@ -98,16 +68,16 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
for
(
i
=
0
;
i
<
16
;
i
++
)
{
if
(
castout_entry
<
8
)
{
global_entry
=
(
esid
&
0x1f
)
<<
3
;
ste
=
(
STE
*
)(
stab
|
((
esid
&
0x1f
)
<<
7
));
ste
=
(
struct
stab_entry
*
)(
stab
|
((
esid
&
0x1f
)
<<
7
));
castout_ste
=
ste
+
castout_entry
;
}
else
{
global_entry
=
((
~
esid
)
&
0x1f
)
<<
3
;
ste
=
(
STE
*
)(
stab
|
(((
~
esid
)
&
0x1f
)
<<
7
));
ste
=
(
struct
stab_entry
*
)(
stab
|
(((
~
esid
)
&
0x1f
)
<<
7
));
castout_ste
=
ste
+
(
castout_entry
-
8
);
}
/* Dont cast out the first kernel segment */
if
(
castout_ste
->
dw0
.
dw0
.
esid
!=
GET_ESID
(
KERNELBASE
)
)
if
(
(
castout_ste
->
esid_data
&
ESID_MASK
)
!=
KERNELBASE
)
break
;
castout_entry
=
(
castout_entry
+
1
)
&
0xf
;
...
...
@@ -120,129 +90,80 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
/* Force previous translations to complete. DRENG */
asm
volatile
(
"isync"
:
:
:
"memory"
);
castout_ste
->
dw0
.
dw0
.
v
=
0
;
old_esid
=
castout_ste
->
esid_data
>>
SID_SHIFT
;
castout_ste
->
esid_data
=
0
;
/* Invalidate old entry */
asm
volatile
(
"sync"
:
:
:
"memory"
);
/* Order update */
castout_ste
->
dw0
.
dword0
=
0
;
castout_ste
->
dw1
.
dword1
=
0
;
castout_ste
->
dw1
.
dw1
.
vsid
=
vsid
;
old_esid
=
castout_ste
->
dw0
.
dw0
.
esid
;
castout_ste
->
dw0
.
dw0
.
esid
=
esid
;
castout_ste
->
dw0
.
dw0
.
kp
=
1
;
if
(
!
kernel_segment
)
castout_ste
->
dw0
.
dw0
.
ks
=
1
;
castout_ste
->
vsid_data
=
vsid_data
;
asm
volatile
(
"eieio"
:
:
:
"memory"
);
/* Order update */
castout_ste
->
dw0
.
dw0
.
v
=
1
;
asm
volatile
(
"slbie %0"
:
:
"r"
(
old_esid
<<
SID_SHIFT
));
castout_ste
->
esid_data
=
esid_data
;
asm
volatile
(
"slbie %0"
:
:
"r"
(
old_esid
<<
SID_SHIFT
));
/* Ensure completion of slbie */
asm
volatile
(
"sync"
:
:
:
"memory"
);
return
(
global_entry
|
(
castout_entry
&
0x7
));
}
static
inline
void
__ste_allocate
(
unsigned
long
esid
,
unsigned
long
vsid
)
{
unsigned
char
stab_entry
;
unsigned
long
offset
;
int
region_id
=
REGION_ID
(
esid
<<
SID_SHIFT
);
stab_entry
=
make_ste
(
get_paca
()
->
stab_addr
,
esid
,
vsid
);
if
(
region_id
!=
USER_REGION_ID
)
return
;
offset
=
__get_cpu_var
(
stab_cache_ptr
);
if
(
offset
<
NR_STAB_CACHE_ENTRIES
)
__get_cpu_var
(
stab_cache
[
offset
++
])
=
stab_entry
;
else
offset
=
NR_STAB_CACHE_ENTRIES
+
1
;
__get_cpu_var
(
stab_cache_ptr
)
=
offset
;
}
/*
* Allocate a segment table entry for the given ea
.
* Allocate a segment table entry for the given ea
and mm
*/
int
ste_allocate
(
unsigned
long
ea
)
static
int
__ste_allocate
(
unsigned
long
ea
,
struct
mm_struct
*
mm
)
{
unsigned
long
vsid
,
esid
;
mm_context_t
context
;
unsigned
long
vsid
;
unsigned
char
stab_entry
;
unsigned
long
offset
;
/* Check for invalid effective addresses. */
if
(
!
IS_VALID_EA
(
ea
))
return
1
;
/* Kernel or user address? */
if
(
REGION_ID
(
ea
)
>=
KERNEL_REGION_ID
)
{
if
(
ea
>=
KERNELBASE
)
{
vsid
=
get_kernel_vsid
(
ea
);
context
=
KERNEL_CONTEXT
(
ea
);
}
else
{
if
(
!
current
->
mm
)
if
(
!
mm
)
return
1
;
context
=
current
->
mm
->
context
;
vsid
=
get_vsid
(
context
.
id
,
ea
);
vsid
=
get_vsid
(
mm
->
context
.
id
,
ea
);
}
esid
=
GET_ESID
(
ea
);
__ste_allocate
(
esid
,
vsid
);
/* Order update */
asm
volatile
(
"sync"
:::
"memory"
);
stab_entry
=
make_ste
(
get_paca
()
->
stab_addr
,
GET_ESID
(
ea
),
vsid
);
if
(
ea
<
KERNELBASE
)
{
offset
=
__get_cpu_var
(
stab_cache_ptr
);
if
(
offset
<
NR_STAB_CACHE_ENTRIES
)
__get_cpu_var
(
stab_cache
[
offset
++
])
=
stab_entry
;
else
offset
=
NR_STAB_CACHE_ENTRIES
+
1
;
__get_cpu_var
(
stab_cache_ptr
)
=
offset
;
/* Order update */
asm
volatile
(
"sync"
:::
"memory"
);
}
return
0
;
}
int
ste_allocate
(
unsigned
long
ea
)
{
return
__ste_allocate
(
ea
,
current
->
mm
);
}
/*
* preload some userspace segments into the segment table.
* Do the segment table work for a context switch: flush all user
* entries from the table, then preload some probably useful entries
* for the new task
*/
static
void
preload
_stab
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
void
switch
_stab
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
{
struct
stab_entry
*
stab
=
(
struct
stab_entry
*
)
get_paca
()
->
stab_addr
;
struct
stab_entry
*
ste
;
unsigned
long
offset
=
__get_cpu_var
(
stab_cache_ptr
);
unsigned
long
pc
=
KSTK_EIP
(
tsk
);
unsigned
long
stack
=
KSTK_ESP
(
tsk
);
unsigned
long
unmapped_base
;
unsigned
long
pc_esid
=
GET_ESID
(
pc
);
unsigned
long
stack_esid
=
GET_ESID
(
stack
);
unsigned
long
unmapped_base_esid
;
unsigned
long
vsid
;
if
(
test_tsk_thread_flag
(
tsk
,
TIF_32BIT
))
unmapped_base
=
TASK_UNMAPPED_BASE_USER32
;
else
unmapped_base
=
TASK_UNMAPPED_BASE_USER64
;
unmapped_base_esid
=
GET_ESID
(
unmapped_base
);
if
(
!
IS_VALID_EA
(
pc
)
||
(
REGION_ID
(
pc
)
>=
KERNEL_REGION_ID
))
return
;
vsid
=
get_vsid
(
mm
->
context
.
id
,
pc
);
__ste_allocate
(
pc_esid
,
vsid
);
if
(
pc_esid
==
stack_esid
)
return
;
if
(
!
IS_VALID_EA
(
stack
)
||
(
REGION_ID
(
stack
)
>=
KERNEL_REGION_ID
))
return
;
vsid
=
get_vsid
(
mm
->
context
.
id
,
stack
);
__ste_allocate
(
stack_esid
,
vsid
);
if
(
pc_esid
==
unmapped_base_esid
||
stack_esid
==
unmapped_base_esid
)
return
;
if
(
!
IS_VALID_EA
(
unmapped_base
)
||
(
REGION_ID
(
unmapped_base
)
>=
KERNEL_REGION_ID
))
return
;
vsid
=
get_vsid
(
mm
->
context
.
id
,
unmapped_base
);
__ste_allocate
(
unmapped_base_esid
,
vsid
);
/* Order update */
asm
volatile
(
"sync"
:
:
:
"memory"
);
}
/* Flush all user entries from the segment table of the current processor. */
void
flush_stab
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
{
STE
*
stab
=
(
STE
*
)
get_paca
()
->
stab_addr
;
STE
*
ste
;
unsigned
long
offset
=
__get_cpu_var
(
stab_cache_ptr
);
/* Force previous translations to complete. DRENG */
asm
volatile
(
"isync"
:
:
:
"memory"
);
...
...
@@ -252,7 +173,7 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
for
(
i
=
0
;
i
<
offset
;
i
++
)
{
ste
=
stab
+
__get_cpu_var
(
stab_cache
[
i
]);
ste
->
dw0
.
dw0
.
v
=
0
;
ste
->
esid_data
=
0
;
/* invalidate entry */
}
}
else
{
unsigned
long
entry
;
...
...
@@ -263,12 +184,12 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Never flush the first entry. */
ste
+=
1
;
for
(
entry
=
1
;
entry
<
(
PAGE_SIZE
/
sizeof
(
STE
));
entry
<
(
PAGE_SIZE
/
sizeof
(
struct
stab_entry
));
entry
++
,
ste
++
)
{
unsigned
long
ea
;
ea
=
ste
->
dw0
.
dw0
.
esid
<<
SID_SHIFT
;
ea
=
ste
->
esid_data
&
ESID_MASK
;
if
(
ea
<
KERNELBASE
)
{
ste
->
dw0
.
dw0
.
v
=
0
;
ste
->
esid_data
=
0
;
}
}
}
...
...
@@ -277,5 +198,47 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
__get_cpu_var
(
stab_cache_ptr
)
=
0
;
preload_stab
(
tsk
,
mm
);
/* Now preload some entries for the new task */
if
(
test_tsk_thread_flag
(
tsk
,
TIF_32BIT
))
unmapped_base
=
TASK_UNMAPPED_BASE_USER32
;
else
unmapped_base
=
TASK_UNMAPPED_BASE_USER64
;
__ste_allocate
(
pc
,
mm
);
if
(
GET_ESID
(
pc
)
==
GET_ESID
(
stack
))
return
;
__ste_allocate
(
stack
,
mm
);
if
((
GET_ESID
(
pc
)
==
GET_ESID
(
unmapped_base
))
||
(
GET_ESID
(
stack
)
==
GET_ESID
(
unmapped_base
)))
return
;
__ste_allocate
(
unmapped_base
,
mm
);
/* Order update */
asm
volatile
(
"sync"
:
:
:
"memory"
);
}
extern
void
slb_initialize
(
void
);
/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
* entries are faulted in.
*/
void
stab_initialize
(
unsigned
long
stab
)
{
unsigned
long
vsid
=
get_kernel_vsid
(
KERNELBASE
);
if
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
)
{
slb_initialize
();
}
else
{
asm
volatile
(
"isync; slbia; isync"
:::
"memory"
);
make_ste
(
stab
,
GET_ESID
(
KERNELBASE
),
vsid
);
/* Order update */
asm
volatile
(
"sync"
:::
"memory"
);
}
}
include/asm-ppc64/mmu.h
View file @
bf851860
...
...
@@ -37,33 +37,17 @@ typedef struct {
mm_context_t ctx = { .id = REGION_ID(ea), KERNEL_LOW_HPAGES}; \
ctx; })
typedef
struct
{
unsigned
long
esid
:
36
;
/* Effective segment ID */
unsigned
long
resv0
:
20
;
/* Reserved */
unsigned
long
v
:
1
;
/* Entry valid (v=1) or invalid */
unsigned
long
resv1
:
1
;
/* Reserved */
unsigned
long
ks
:
1
;
/* Supervisor (privileged) state storage key */
unsigned
long
kp
:
1
;
/* Problem state storage key */
unsigned
long
n
:
1
;
/* No-execute if n=1 */
unsigned
long
resv2
:
3
;
/* padding to a 64b boundary */
}
ste_dword0
;
#define STE_ESID_V 0x80
#define STE_ESID_KS 0x20
#define STE_ESID_KP 0x10
#define STE_ESID_N 0x08
typedef
struct
{
unsigned
long
vsid
:
52
;
/* Virtual segment ID */
unsigned
long
resv0
:
12
;
/* Padding to a 64b boundary */
}
ste_dword1
;
#define STE_VSID_SHIFT 12
typedef
struct
_STE
{
union
{
unsigned
long
dword0
;
ste_dword0
dw0
;
}
dw0
;
union
{
unsigned
long
dword1
;
ste_dword1
dw1
;
}
dw1
;
}
STE
;
struct
stab_entry
{
unsigned
long
esid_data
;
unsigned
long
vsid_data
;
};
/* Hardware Page Table Entry */
...
...
include/asm-ppc64/mmu_context.h
View file @
bf851860
...
...
@@ -135,7 +135,7 @@ destroy_context(struct mm_struct *mm)
spin_unlock_irqrestore
(
&
mmu_context_queue
.
lock
,
flags
);
}
extern
void
flus
h_stab
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
);
extern
void
switc
h_stab
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
);
extern
void
switch_slb
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
);
/*
...
...
@@ -163,7 +163,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
)
switch_slb
(
tsk
,
next
);
else
flus
h_stab
(
tsk
,
next
);
switc
h_stab
(
tsk
,
next
);
}
#define deactivate_mm(tsk,mm) do { } while (0)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment