Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9bc8ec89
Commit
9bc8ec89
authored
Jun 08, 2002
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/scratch/anton/linux-2.5
into samba.org:/scratch/anton/linux-2.5_ppc64
parents
3da9cf28
7de7e33b
Changes
20
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
535 additions
and
655 deletions
+535
-655
arch/ppc64/config.in
arch/ppc64/config.in
+4
-0
arch/ppc64/kernel/eeh.c
arch/ppc64/kernel/eeh.c
+8
-7
arch/ppc64/kernel/entry.S
arch/ppc64/kernel/entry.S
+2
-2
arch/ppc64/kernel/head.S
arch/ppc64/kernel/head.S
+6
-5
arch/ppc64/kernel/htab.c
arch/ppc64/kernel/htab.c
+27
-13
arch/ppc64/kernel/irq.c
arch/ppc64/kernel/irq.c
+3
-1
arch/ppc64/kernel/pSeries_htab.c
arch/ppc64/kernel/pSeries_htab.c
+14
-16
arch/ppc64/kernel/pSeries_lpar.c
arch/ppc64/kernel/pSeries_lpar.c
+7
-9
arch/ppc64/kernel/proc_pmc.c
arch/ppc64/kernel/proc_pmc.c
+0
-3
arch/ppc64/kernel/signal.c
arch/ppc64/kernel/signal.c
+80
-78
arch/ppc64/kernel/signal32.c
arch/ppc64/kernel/signal32.c
+106
-152
arch/ppc64/kernel/smp.c
arch/ppc64/kernel/smp.c
+2
-6
arch/ppc64/mm/init.c
arch/ppc64/mm/init.c
+79
-93
arch/ppc64/xmon/xmon.c
arch/ppc64/xmon/xmon.c
+0
-259
include/asm-ppc64/bitops.h
include/asm-ppc64/bitops.h
+6
-0
include/asm-ppc64/mmzone.h
include/asm-ppc64/mmzone.h
+93
-0
include/asm-ppc64/page.h
include/asm-ppc64/page.h
+10
-3
include/asm-ppc64/pgalloc.h
include/asm-ppc64/pgalloc.h
+3
-0
include/asm-ppc64/tlb.h
include/asm-ppc64/tlb.h
+85
-0
include/asm-ppc64/tlbflush.h
include/asm-ppc64/tlbflush.h
+0
-8
No files found.
arch/ppc64/config.in
View file @
9bc8ec89
...
@@ -23,6 +23,10 @@ if [ "$CONFIG_SMP" = "y" ]; then
...
@@ -23,6 +23,10 @@ if [ "$CONFIG_SMP" = "y" ]; then
bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
if [ "$CONFIG_PPC_PSERIES" = "y" ]; then
if [ "$CONFIG_PPC_PSERIES" = "y" ]; then
bool ' Hardware multithreading' CONFIG_HMT
bool ' Hardware multithreading' CONFIG_HMT
bool ' Discontiguous Memory Support' CONFIG_DISCONTIGMEM
if [ "$CONFIG_DISCONTIGMEM" = "y" ]; then
bool ' NUMA support' CONFIG_NUMA
fi
fi
fi
fi
fi
define_bool CONFIG_PREEMPT n
define_bool CONFIG_PREEMPT n
...
...
arch/ppc64/kernel/eeh.c
View file @
9bc8ec89
...
@@ -59,15 +59,14 @@ unsigned long eeh_token(unsigned long phb, unsigned long bus, unsigned long devf
...
@@ -59,15 +59,14 @@ unsigned long eeh_token(unsigned long phb, unsigned long bus, unsigned long devf
return
((
IO_UNMAPPED_REGION_ID
<<
60
)
|
(
phb
<<
48UL
)
|
((
bus
&
0xff
)
<<
40UL
)
|
(
devfn
<<
32UL
)
|
(
offset
&
0xffffffff
));
return
((
IO_UNMAPPED_REGION_ID
<<
60
)
|
(
phb
<<
48UL
)
|
((
bus
&
0xff
)
<<
40UL
)
|
(
devfn
<<
32UL
)
|
(
offset
&
0xffffffff
));
}
}
int
eeh_get_state
(
unsigned
long
ea
)
{
int
eeh_get_state
(
unsigned
long
ea
)
{
return
0
;
return
0
;
}
}
/* Check for an eeh failure at the given token address.
/* Check for an eeh failure at the given token address.
* The given value has been read and it should be 1's (0xff, 0xffff or 0xffffffff).
* The given value has been read and it should be 1's (0xff, 0xffff or
* 0xffffffff).
*
*
* Probe to determine if an error actually occurred. If not return val.
* Probe to determine if an error actually occurred. If not return val.
* Otherwise panic.
* Otherwise panic.
...
@@ -113,7 +112,8 @@ unsigned long eeh_check_failure(void *token, unsigned long val)
...
@@ -113,7 +112,8 @@ unsigned long eeh_check_failure(void *token, unsigned long val)
return
val
;
/* good case */
return
val
;
/* good case */
}
}
void
eeh_init
(
void
)
{
void
eeh_init
(
void
)
{
extern
char
cmd_line
[];
/* Very early cmd line parse. Cheap, but works. */
extern
char
cmd_line
[];
/* Very early cmd line parse. Cheap, but works. */
char
*
eeh_force_off
=
strstr
(
cmd_line
,
"eeh-force-off"
);
char
*
eeh_force_off
=
strstr
(
cmd_line
,
"eeh-force-off"
);
char
*
eeh_force_on
=
strstr
(
cmd_line
,
"eeh-force-on"
);
char
*
eeh_force_on
=
strstr
(
cmd_line
,
"eeh-force-on"
);
...
@@ -121,7 +121,7 @@ void eeh_init(void) {
...
@@ -121,7 +121,7 @@ void eeh_init(void) {
ibm_set_eeh_option
=
rtas_token
(
"ibm,set-eeh-option"
);
ibm_set_eeh_option
=
rtas_token
(
"ibm,set-eeh-option"
);
ibm_set_slot_reset
=
rtas_token
(
"ibm,set-slot-reset"
);
ibm_set_slot_reset
=
rtas_token
(
"ibm,set-slot-reset"
);
ibm_read_slot_reset_state
=
rtas_token
(
"ibm,read-slot-reset-state"
);
ibm_read_slot_reset_state
=
rtas_token
(
"ibm,read-slot-reset-state"
);
if
(
ibm_set_eeh_option
!=
RTAS_UNKNOWN_SERVICE
&&
naca
->
platform
==
PLATFORM_PSERIES_LPAR
)
if
(
ibm_set_eeh_option
!=
RTAS_UNKNOWN_SERVICE
)
eeh_implemented
=
1
;
eeh_implemented
=
1
;
if
(
eeh_force_off
>
eeh_force_on
)
{
if
(
eeh_force_off
>
eeh_force_on
)
{
...
@@ -334,6 +334,7 @@ static int __init eehoff_parm(char *str)
...
@@ -334,6 +334,7 @@ static int __init eehoff_parm(char *str)
{
{
return
eeh_parm
(
str
,
0
);
return
eeh_parm
(
str
,
0
);
}
}
static
int
__init
eehon_parm
(
char
*
str
)
static
int
__init
eehon_parm
(
char
*
str
)
{
{
return
eeh_parm
(
str
,
1
);
return
eeh_parm
(
str
,
1
);
...
...
arch/ppc64/kernel/entry.S
View file @
9bc8ec89
...
@@ -353,7 +353,7 @@ recheck:
...
@@ -353,7 +353,7 @@ recheck:
li
r4
,
0
li
r4
,
0
ori
r4
,
r4
,
MSR_EE
|
MSR_RI
ori
r4
,
r4
,
MSR_EE
|
MSR_RI
andc
r10
,
r10
,
r4
/*
clear
MSR_EE
and
MSR_RI
*/
andc
r10
,
r10
,
r4
/*
clear
MSR_EE
and
MSR_RI
*/
mtmsrd
r10
/*
Update
machine
state
*/
mtmsrd
r10
,
1
/*
Update
machine
state
*/
#ifdef CONFIG_PPC_ISERIES
#ifdef CONFIG_PPC_ISERIES
#error fix iSeries soft disable
#error fix iSeries soft disable
...
@@ -411,7 +411,7 @@ restore:
...
@@ -411,7 +411,7 @@ restore:
do_work
:
do_work
:
/
*
Enable
interrupts
*/
/
*
Enable
interrupts
*/
ori
r10
,
r10
,
MSR_EE
|
MSR_RI
ori
r10
,
r10
,
MSR_EE
|
MSR_RI
mtmsrd
r10
mtmsrd
r10
,
1
andi
.
r0
,
r3
,
_TIF_NEED_RESCHED
andi
.
r0
,
r3
,
_TIF_NEED_RESCHED
beq
1
f
beq
1
f
...
...
arch/ppc64/kernel/head.S
View file @
9bc8ec89
...
@@ -575,7 +575,8 @@ stab_bolted_user_return:
...
@@ -575,7 +575,8 @@ stab_bolted_user_return:
bl
.
do_stab_SI
bl
.
do_stab_SI
b
1
f
b
1
f
2
:
bl
.
do_hash_page_DSI
/*
Try
to
handle
as
hpte
fault
*/
2
:
li
r5
,
0x300
bl
.
do_hash_page_DSI
/*
Try
to
handle
as
hpte
fault
*/
1
:
1
:
ld
r4
,
_DAR
(
r1
)
ld
r4
,
_DAR
(
r1
)
ld
r5
,
_DSISR
(
r1
)
ld
r5
,
_DSISR
(
r1
)
...
@@ -627,9 +628,8 @@ InstructionAccess_common:
...
@@ -627,9 +628,8 @@ InstructionAccess_common:
bl
.
do_stab_SI
bl
.
do_stab_SI
b
1
f
b
1
f
2
:
andis
.
r0
,
r23
,
0x4000
/*
no
pte
found
?
*/
2
:
mr
r3
,
r22
beq
1
f
/*
if
so
,
try
to
put
a
PTE
*/
li
r5
,
0x400
mr
r3
,
r22
/*
into
the
hash
table
*/
bl
.
do_hash_page_ISI
/*
Try
to
handle
as
hpte
fault
*/
bl
.
do_hash_page_ISI
/*
Try
to
handle
as
hpte
fault
*/
1
:
1
:
mr
r4
,
r22
mr
r4
,
r22
...
@@ -804,6 +804,7 @@ _GLOBAL(do_hash_page_DSI)
...
@@ -804,6 +804,7 @@ _GLOBAL(do_hash_page_DSI)
/
*
/
*
*
r3
contains
the
faulting
address
*
r3
contains
the
faulting
address
*
r4
contains
the
required
access
permissions
*
r4
contains
the
required
access
permissions
*
r5
contains
the
trap
number
*
*
*
at
return
r3
=
0
for
success
*
at
return
r3
=
0
for
success
*/
*/
...
@@ -1119,7 +1120,7 @@ _GLOBAL(save_remaining_regs)
...
@@ -1119,7 +1120,7 @@ _GLOBAL(save_remaining_regs)
rldimi
r22
,
r20
,
15
,
48
/*
Insert
desired
EE
value
*/
rldimi
r22
,
r20
,
15
,
48
/*
Insert
desired
EE
value
*/
#endif
#endif
mtmsrd
r22
mtmsrd
r22
,
1
blr
blr
...
...
arch/ppc64/kernel/htab.c
View file @
9bc8ec89
...
@@ -45,9 +45,8 @@
...
@@ -45,9 +45,8 @@
#include <asm/lmb.h>
#include <asm/lmb.h>
#include <asm/abs_addr.h>
#include <asm/abs_addr.h>
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PPC_EEH
#include <asm/eeh.h>
#include <asm/eeh.h>
#
endif
#
include <asm/tlb.h>
/*
/*
* Note: pte --> Linux PTE
* Note: pte --> Linux PTE
...
@@ -196,7 +195,7 @@ static inline unsigned long computeHptePP(unsigned long pte)
...
@@ -196,7 +195,7 @@ static inline unsigned long computeHptePP(unsigned long pte)
* to be valid via Linux page tables, return 1. If handled return 0
* to be valid via Linux page tables, return 1. If handled return 0
*/
*/
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
pte_t
*
ptep
)
pte_t
*
ptep
,
unsigned
long
trap
)
{
{
unsigned
long
va
,
vpn
;
unsigned
long
va
,
vpn
;
unsigned
long
newpp
,
prpn
;
unsigned
long
newpp
,
prpn
;
...
@@ -245,6 +244,24 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
...
@@ -245,6 +244,24 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
newpp
=
computeHptePP
(
pte_val
(
new_pte
));
newpp
=
computeHptePP
(
pte_val
(
new_pte
));
#define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on POWER4 */
if
(
__is_processor
(
PV_POWER4
)
&&
pfn_valid
(
pte_pfn
(
new_pte
)))
{
struct
page
*
page
=
pte_page
(
new_pte
);
/* page is dirty */
if
(
!
PageReserved
(
page
)
&&
!
test_bit
(
PG_arch_1
,
&
page
->
flags
))
{
if
(
trap
==
0x400
)
{
__flush_dcache_icache
(
page_address
(
page
));
set_bit
(
PG_arch_1
,
&
page
->
flags
);
}
else
{
newpp
|=
PPC64_HWNOEXEC
;
}
}
}
/* Check if pte already has an hpte (case 2) */
/* Check if pte already has an hpte (case 2) */
if
(
pte_val
(
old_pte
)
&
_PAGE_HASHPTE
)
{
if
(
pte_val
(
old_pte
)
&
_PAGE_HASHPTE
)
{
/* There MIGHT be an HPTE for this pte */
/* There MIGHT be an HPTE for this pte */
...
@@ -318,7 +335,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
...
@@ -318,7 +335,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
return
0
;
return
0
;
}
}
int
hash_page
(
unsigned
long
ea
,
unsigned
long
access
)
int
hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
trap
)
{
{
void
*
pgdir
;
void
*
pgdir
;
unsigned
long
vsid
;
unsigned
long
vsid
;
...
@@ -346,13 +363,11 @@ int hash_page(unsigned long ea, unsigned long access)
...
@@ -346,13 +363,11 @@ int hash_page(unsigned long ea, unsigned long access)
mm
=
&
init_mm
;
mm
=
&
init_mm
;
vsid
=
get_kernel_vsid
(
ea
);
vsid
=
get_kernel_vsid
(
ea
);
break
;
break
;
#ifdef CONFIG_PPC_EEH
case
IO_UNMAPPED_REGION_ID
:
case
IO_UNMAPPED_REGION_ID
:
udbg_printf
(
"EEH Error ea = 0x%lx
\n
"
,
ea
);
udbg_printf
(
"EEH Error ea = 0x%lx
\n
"
,
ea
);
PPCDBG_ENTER_DEBUGGER
();
PPCDBG_ENTER_DEBUGGER
();
panic
(
"EEH Error ea = 0x%lx
\n
"
,
ea
);
panic
(
"EEH Error ea = 0x%lx
\n
"
,
ea
);
break
;
break
;
#endif
case
KERNEL_REGION_ID
:
case
KERNEL_REGION_ID
:
/*
/*
* As htab_initialize is now, we shouldn't ever get here since
* As htab_initialize is now, we shouldn't ever get here since
...
@@ -379,7 +394,7 @@ int hash_page(unsigned long ea, unsigned long access)
...
@@ -379,7 +394,7 @@ int hash_page(unsigned long ea, unsigned long access)
*/
*/
spin_lock
(
&
mm
->
page_table_lock
);
spin_lock
(
&
mm
->
page_table_lock
);
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
);
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
,
trap
);
spin_unlock
(
&
mm
->
page_table_lock
);
spin_unlock
(
&
mm
->
page_table_lock
);
return
ret
;
return
ret
;
...
@@ -419,12 +434,11 @@ void flush_hash_range(unsigned long context, unsigned long number, int local)
...
@@ -419,12 +434,11 @@ void flush_hash_range(unsigned long context, unsigned long number, int local)
ppc_md
.
flush_hash_range
(
context
,
number
,
local
);
ppc_md
.
flush_hash_range
(
context
,
number
,
local
);
}
else
{
}
else
{
int
i
;
int
i
;
struct
tlb_batch_data
*
ptes
=
struct
ppc64_tlb_batch
*
batch
=
&
tlb_batch_array
[
smp_processor_id
()][
0
];
&
ppc64_tlb_batch
[
smp_processor_id
()
];
for
(
i
=
0
;
i
<
number
;
i
++
)
{
for
(
i
=
0
;
i
<
number
;
i
++
)
flush_hash_page
(
context
,
ptes
->
addr
,
ptes
->
pte
,
local
);
flush_hash_page
(
context
,
batch
->
addr
[
i
],
batch
->
pte
[
i
],
ptes
++
;
local
);
}
}
}
}
}
arch/ppc64/kernel/irq.c
View file @
9bc8ec89
...
@@ -474,6 +474,8 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
...
@@ -474,6 +474,8 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
int
cpu
=
smp_processor_id
();
int
cpu
=
smp_processor_id
();
irq_desc_t
*
desc
=
irq_desc
+
irq
;
irq_desc_t
*
desc
=
irq_desc
+
irq
;
/* XXX This causes bad performance and lockups on XICS - Anton */
if
(
naca
->
interrupt_controller
==
IC_OPEN_PIC
)
balance_irq
(
irq
);
balance_irq
(
irq
);
kstat
.
irqs
[
cpu
][
irq
]
++
;
kstat
.
irqs
[
cpu
][
irq
]
++
;
...
...
arch/ppc64/kernel/pSeries_htab.c
View file @
9bc8ec89
...
@@ -20,6 +20,7 @@
...
@@ -20,6 +20,7 @@
#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
/*
/*
* Create a pte. Used during initialization only.
* Create a pte. Used during initialization only.
...
@@ -214,7 +215,7 @@ static inline void set_pp_bit(unsigned long pp, HPTE *addr)
...
@@ -214,7 +215,7 @@ static inline void set_pp_bit(unsigned long pp, HPTE *addr)
__asm__
__volatile__
(
__asm__
__volatile__
(
"1: ldarx %0,0,%3
\n
\
"1: ldarx %0,0,%3
\n
\
rldimi %0,%2,0,6
2
\n
\
rldimi %0,%2,0,6
1
\n
\
stdcx. %0,0,%3
\n
\
stdcx. %0,0,%3
\n
\
bne 1b"
bne 1b"
:
"=&r"
(
old
),
"=m"
(
*
p
)
:
"=&r"
(
old
),
"=m"
(
*
p
)
...
@@ -265,8 +266,6 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
...
@@ -265,8 +266,6 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned
long
vpn
,
avpn
;
unsigned
long
vpn
,
avpn
;
unsigned
long
flags
;
unsigned
long
flags
;
udbg_printf
(
"updatepp
\n
"
);
if
(
large
)
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
else
else
...
@@ -372,31 +371,32 @@ static void pSeries_flush_hash_range(unsigned long context,
...
@@ -372,31 +371,32 @@ static void pSeries_flush_hash_range(unsigned long context,
{
{
unsigned
long
vsid
,
vpn
,
va
,
hash
,
secondary
,
slot
,
flags
,
avpn
;
unsigned
long
vsid
,
vpn
,
va
,
hash
,
secondary
,
slot
,
flags
,
avpn
;
int
i
,
j
;
int
i
,
j
;
unsigned
long
va_array
[
MAX_BATCH_FLUSH
];
HPTE
*
hptep
;
HPTE
*
hptep
;
Hpte_dword0
dw0
;
Hpte_dword0
dw0
;
struct
tlb_batch_data
*
ptes
=
&
tlb_batch_array
[
smp_processor_id
()][
0
];
struct
ppc64_tlb_batch
*
batch
=
&
ppc64_tlb_batch
[
smp_processor_id
()
];
/* XXX fix for large ptes */
/* XXX fix for large ptes */
unsigned
long
large
=
0
;
unsigned
long
large
=
0
;
j
=
0
;
j
=
0
;
for
(
i
=
0
;
i
<
number
;
i
++
)
{
for
(
i
=
0
;
i
<
number
;
i
++
)
{
if
((
ptes
->
addr
>=
USER_START
)
&&
(
ptes
->
addr
<=
USER_END
))
if
((
batch
->
addr
[
i
]
>=
USER_START
)
&&
vsid
=
get_vsid
(
context
,
ptes
->
addr
);
(
batch
->
addr
[
i
]
<=
USER_END
))
vsid
=
get_vsid
(
context
,
batch
->
addr
[
i
]);
else
else
vsid
=
get_kernel_vsid
(
ptes
->
addr
);
vsid
=
get_kernel_vsid
(
batch
->
addr
[
i
]
);
va
=
(
vsid
<<
28
)
|
(
ptes
->
addr
&
0x0fffffff
);
va
=
(
vsid
<<
28
)
|
(
batch
->
addr
[
i
]
&
0x0fffffff
);
va_array
[
j
]
=
va
;
batch
->
vaddr
[
j
]
=
va
;
if
(
large
)
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
else
else
vpn
=
va
>>
PAGE_SHIFT
;
vpn
=
va
>>
PAGE_SHIFT
;
hash
=
hpt_hash
(
vpn
,
large
);
hash
=
hpt_hash
(
vpn
,
large
);
secondary
=
(
pte_val
(
ptes
->
pte
)
&
_PAGE_SECONDARY
)
>>
15
;
secondary
=
(
pte_val
(
batch
->
pte
[
i
]
)
&
_PAGE_SECONDARY
)
>>
15
;
if
(
secondary
)
if
(
secondary
)
hash
=
~
hash
;
hash
=
~
hash
;
slot
=
(
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
;
slot
=
(
hash
&
htab_data
.
htab_hash_mask
)
*
HPTES_PER_GROUP
;
slot
+=
(
pte_val
(
ptes
->
pte
)
&
_PAGE_GROUP_IX
)
>>
12
;
slot
+=
(
pte_val
(
batch
->
pte
[
i
]
)
&
_PAGE_GROUP_IX
)
>>
12
;
hptep
=
htab_data
.
htab
+
slot
;
hptep
=
htab_data
.
htab
+
slot
;
avpn
=
vpn
>>
11
;
avpn
=
vpn
>>
11
;
...
@@ -405,8 +405,6 @@ static void pSeries_flush_hash_range(unsigned long context,
...
@@ -405,8 +405,6 @@ static void pSeries_flush_hash_range(unsigned long context,
dw0
=
hptep
->
dw0
.
dw0
;
dw0
=
hptep
->
dw0
.
dw0
;
ptes
++
;
if
((
dw0
.
avpn
!=
avpn
)
||
!
dw0
.
v
)
{
if
((
dw0
.
avpn
!=
avpn
)
||
!
dw0
.
v
)
{
pSeries_unlock_hpte
(
hptep
);
pSeries_unlock_hpte
(
hptep
);
udbg_printf
(
"invalidate missed
\n
"
);
udbg_printf
(
"invalidate missed
\n
"
);
...
@@ -426,7 +424,7 @@ static void pSeries_flush_hash_range(unsigned long context,
...
@@ -426,7 +424,7 @@ static void pSeries_flush_hash_range(unsigned long context,
asm
volatile
(
"
\n
\
asm
volatile
(
"
\n
\
clrldi %0,%0,16
\n
\
clrldi %0,%0,16
\n
\
tlbiel %0"
tlbiel %0"
:
:
"r"
(
va_array
[
i
])
:
"memory"
);
:
:
"r"
(
batch
->
vaddr
[
i
])
:
"memory"
);
}
}
asm
volatile
(
"ptesync"
:::
"memory"
);
asm
volatile
(
"ptesync"
:::
"memory"
);
...
@@ -440,7 +438,7 @@ static void pSeries_flush_hash_range(unsigned long context,
...
@@ -440,7 +438,7 @@ static void pSeries_flush_hash_range(unsigned long context,
asm
volatile
(
"
\n
\
asm
volatile
(
"
\n
\
clrldi %0,%0,16
\n
\
clrldi %0,%0,16
\n
\
tlbie %0"
tlbie %0"
:
:
"r"
(
va_array
[
i
])
:
"memory"
);
:
:
"r"
(
batch
->
vaddr
[
i
])
:
"memory"
);
}
}
asm
volatile
(
"eieio; tlbsync; ptesync"
:::
"memory"
);
asm
volatile
(
"eieio; tlbsync; ptesync"
:::
"memory"
);
...
...
arch/ppc64/kernel/pSeries_lpar.c
View file @
9bc8ec89
...
@@ -33,6 +33,7 @@
...
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/pci.h>
#include <asm/naca.h>
#include <asm/naca.h>
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
/* Status return values */
/* Status return values */
#define H_Success 0
#define H_Success 0
...
@@ -646,11 +647,9 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp,
...
@@ -646,11 +647,9 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp,
{
{
unsigned
long
lpar_rc
;
unsigned
long
lpar_rc
;
unsigned
long
flags
;
unsigned
long
flags
;
flags
=
(
newpp
&
3
)
|
H_AVPN
;
flags
=
(
newpp
&
7
)
|
H_AVPN
;
unsigned
long
vpn
=
va
>>
PAGE_SHIFT
;
unsigned
long
vpn
=
va
>>
PAGE_SHIFT
;
udbg_printf
(
"updatepp
\n
"
);
lpar_rc
=
plpar_pte_protect
(
flags
,
slot
,
(
vpn
>>
4
)
&
~
0x7fUL
);
lpar_rc
=
plpar_pte_protect
(
flags
,
slot
,
(
vpn
>>
4
)
&
~
0x7fUL
);
if
(
lpar_rc
==
H_Not_Found
)
{
if
(
lpar_rc
==
H_Not_Found
)
{
...
@@ -775,15 +774,14 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
...
@@ -775,15 +774,14 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
int
local
)
int
local
)
{
{
int
i
;
int
i
;
struct
tlb_batch_data
*
ptes
=
&
tlb_batch_array
[
smp_processor_id
()][
0
];
unsigned
long
flags
;
unsigned
long
flags
;
struct
ppc64_tlb_batch
*
batch
=
&
ppc64_tlb_batch
[
smp_processor_id
()];
spin_lock_irqsave
(
&
pSeries_lpar_tlbie_lock
,
flags
);
spin_lock_irqsave
(
&
pSeries_lpar_tlbie_lock
,
flags
);
for
(
i
=
0
;
i
<
number
;
i
++
)
{
flush_hash_page
(
context
,
ptes
->
addr
,
ptes
->
pte
,
local
);
for
(
i
=
0
;
i
<
number
;
i
++
)
ptes
++
;
flush_hash_page
(
context
,
batch
->
addr
[
i
],
batch
->
pte
[
i
],
local
)
;
}
spin_unlock_irqrestore
(
&
pSeries_lpar_tlbie_lock
,
flags
);
spin_unlock_irqrestore
(
&
pSeries_lpar_tlbie_lock
,
flags
);
}
}
...
...
arch/ppc64/kernel/proc_pmc.c
View file @
9bc8ec89
...
@@ -100,9 +100,6 @@ void proc_ppc64_init(void)
...
@@ -100,9 +100,6 @@ void proc_ppc64_init(void)
if
(
!
proc_ppc64_root
)
return
;
if
(
!
proc_ppc64_root
)
return
;
spin_unlock
(
&
proc_ppc64_lock
);
spin_unlock
(
&
proc_ppc64_lock
);
#ifdef CONFIG_PPC_EEH
eeh_init_proc
(
proc_ppc64_root
);
#endif
proc_ppc64_pmc_root
=
proc_mkdir
(
"pmc"
,
proc_ppc64_root
);
proc_ppc64_pmc_root
=
proc_mkdir
(
"pmc"
,
proc_ppc64_root
);
...
...
arch/ppc64/kernel/signal.c
View file @
9bc8ec89
...
@@ -26,8 +26,6 @@
...
@@ -26,8 +26,6 @@
#include <linux/unistd.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/stddef.h>
#include <linux/elf.h>
#include <linux/elf.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <asm/ppc32.h>
#include <asm/ppc32.h>
#include <asm/sigcontext.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/ucontext.h>
...
@@ -59,9 +57,36 @@
...
@@ -59,9 +57,36 @@
*/
*/
#define MSR_USERCHANGE (MSR_FE0 | MSR_FE1)
#define MSR_USERCHANGE (MSR_FE0 | MSR_FE1)
int
do_signal
(
sigset_t
*
oldset
,
struct
pt_regs
*
regs
);
/*
extern
long
sys_wait4
(
pid_t
pid
,
unsigned
int
*
stat_addr
,
* When we have signals to deliver, we set up on the
int
options
,
/*unsigned long*/
struct
rusage
*
ru
);
* user stack, going down from the original stack pointer:
* a sigregs struct
* one or more sigcontext structs with
* a gap of __SIGNAL_FRAMESIZE bytes
*
* Each of these things must be a multiple of 16 bytes in size.
*
*/
struct
sigregs
{
elf_gregset_t
gp_regs
;
double
fp_regs
[
ELF_NFPREG
];
unsigned
int
tramp
[
2
];
/* 64 bit API allows for 288 bytes below sp before
decrementing it. */
int
abigap
[
72
];
};
struct
rt_sigframe
{
unsigned
long
_unused
[
2
];
struct
siginfo
*
pinfo
;
void
*
puc
;
struct
siginfo
info
;
struct
ucontext
uc
;
};
extern
int
do_signal
(
sigset_t
*
oldset
,
struct
pt_regs
*
regs
);
/*
/*
* Atomically swap in the new signal mask, and wait for a signal.
* Atomically swap in the new signal mask, and wait for a signal.
...
@@ -127,7 +152,7 @@ long sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int
...
@@ -127,7 +152,7 @@ long sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int
long
sys_sigaltstack
(
const
stack_t
*
uss
,
stack_t
*
uoss
)
long
sys_sigaltstack
(
const
stack_t
*
uss
,
stack_t
*
uoss
)
{
{
struct
pt_regs
*
regs
=
(
struct
pt_regs
*
)
&
uss
;
struct
pt_regs
*
regs
=
(
struct
pt_regs
*
)
&
uss
;
return
do_sigaltstack
(
uss
,
uoss
,
regs
->
gpr
[
1
]);
return
do_sigaltstack
(
uss
,
uoss
,
regs
->
gpr
[
1
]);
}
}
...
@@ -139,6 +164,7 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
...
@@ -139,6 +164,7 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
if
(
act
)
{
if
(
act
)
{
old_sigset_t
mask
;
old_sigset_t
mask
;
if
(
verify_area
(
VERIFY_READ
,
act
,
sizeof
(
*
act
))
||
if
(
verify_area
(
VERIFY_READ
,
act
,
sizeof
(
*
act
))
||
__get_user
(
new_ka
.
sa
.
sa_handler
,
&
act
->
sa_handler
)
||
__get_user
(
new_ka
.
sa
.
sa_handler
,
&
act
->
sa_handler
)
||
__get_user
(
new_ka
.
sa
.
sa_restorer
,
&
act
->
sa_restorer
))
__get_user
(
new_ka
.
sa
.
sa_restorer
,
&
act
->
sa_restorer
))
...
@@ -148,8 +174,7 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
...
@@ -148,8 +174,7 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
siginitset
(
&
new_ka
.
sa
.
sa_mask
,
mask
);
siginitset
(
&
new_ka
.
sa
.
sa_mask
,
mask
);
}
}
ret
=
do_sigaction
(
sig
,
(
act
?
&
new_ka
:
NULL
),
(
oact
?
&
old_ka
:
NULL
));
ret
=
do_sigaction
(
sig
,
act
?
&
new_ka
:
NULL
,
oact
?
&
old_ka
:
NULL
);
if
(
!
ret
&&
oact
)
{
if
(
!
ret
&&
oact
)
{
if
(
verify_area
(
VERIFY_WRITE
,
oact
,
sizeof
(
*
oact
))
||
if
(
verify_area
(
VERIFY_WRITE
,
oact
,
sizeof
(
*
oact
))
||
__put_user
(
old_ka
.
sa
.
sa_handler
,
&
oact
->
sa_handler
)
||
__put_user
(
old_ka
.
sa
.
sa_handler
,
&
oact
->
sa_handler
)
||
...
@@ -162,35 +187,6 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
...
@@ -162,35 +187,6 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
return
ret
;
return
ret
;
}
}
/*
* When we have signals to deliver, we set up on the
* user stack, going down from the original stack pointer:
* a sigregs struct
* one or more sigcontext structs with
* a gap of __SIGNAL_FRAMESIZE bytes
*
* Each of these things must be a multiple of 16 bytes in size.
*
*/
struct
sigregs
{
elf_gregset_t
gp_regs
;
double
fp_regs
[
ELF_NFPREG
];
unsigned
int
tramp
[
2
];
/* 64 bit API allows for 288 bytes below sp before
decrementing it. */
int
abigap
[
72
];
};
struct
rt_sigframe
{
unsigned
long
_unused
[
2
];
struct
siginfo
*
pinfo
;
void
*
puc
;
struct
siginfo
info
;
struct
ucontext
uc
;
};
/*
/*
* When we have rt signals to deliver, we set up on the
* When we have rt signals to deliver, we set up on the
* user stack, going down from the original stack pointer:
* user stack, going down from the original stack pointer:
...
@@ -231,7 +227,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
...
@@ -231,7 +227,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
* preamble frame (where registers are stored)
* preamble frame (where registers are stored)
* see handle_signal()
* see handle_signal()
*/
*/
sr
=
(
struct
sigregs
*
)
sigctx
.
regs
;
sr
=
(
struct
sigregs
*
)
sigctx
.
regs
;
if
(
copy_from_user
(
saved_regs
,
&
sr
->
gp_regs
,
sizeof
(
sr
->
gp_regs
)))
if
(
copy_from_user
(
saved_regs
,
&
sr
->
gp_regs
,
sizeof
(
sr
->
gp_regs
)))
goto
badframe
;
goto
badframe
;
saved_regs
[
PT_MSR
]
=
(
regs
->
msr
&
~
MSR_USERCHANGE
)
saved_regs
[
PT_MSR
]
=
(
regs
->
msr
&
~
MSR_USERCHANGE
)
...
@@ -251,11 +247,10 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
...
@@ -251,11 +247,10 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
do_exit
(
SIGSEGV
);
do_exit
(
SIGSEGV
);
}
}
static
void
static
void
setup_rt_frame
(
struct
pt_regs
*
regs
,
struct
sigregs
*
frame
,
setup_rt_frame
(
struct
pt_regs
*
regs
,
struct
sigregs
*
frame
,
signed
long
newsp
)
signed
long
newsp
)
{
{
struct
rt_sigframe
*
rt_sf
=
(
struct
rt_sigframe
*
)
newsp
;
struct
rt_sigframe
*
rt_sf
=
(
struct
rt_sigframe
*
)
newsp
;
/* Handler is *really* a pointer to the function descriptor for
/* Handler is *really* a pointer to the function descriptor for
* the signal routine. The first entry in the function
* the signal routine. The first entry in the function
* descriptor is the entry address of signal and the second
* descriptor is the entry address of signal and the second
...
@@ -277,11 +272,13 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
...
@@ -277,11 +272,13 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
if
(
__copy_to_user
(
&
frame
->
gp_regs
,
regs
,
GP_REGS_SIZE
)
if
(
__copy_to_user
(
&
frame
->
gp_regs
,
regs
,
GP_REGS_SIZE
)
||
__copy_to_user
(
&
frame
->
fp_regs
,
current
->
thread
.
fpr
,
||
__copy_to_user
(
&
frame
->
fp_regs
,
current
->
thread
.
fpr
,
ELF_NFPREG
*
sizeof
(
double
))
ELF_NFPREG
*
sizeof
(
double
))
||
__put_user
(
0x38000000UL
+
__NR_rt_sigreturn
,
&
frame
->
tramp
[
0
])
/* li r0, __NR_rt_sigreturn */
/* li r0, __NR_rt_sigreturn */
||
__put_user
(
0x44000002UL
,
&
frame
->
tramp
[
1
]))
/* sc */
||
__put_user
(
0x38000000UL
+
__NR_rt_sigreturn
,
&
frame
->
tramp
[
0
])
/* sc */
||
__put_user
(
0x44000002UL
,
&
frame
->
tramp
[
1
]))
goto
badframe
;
goto
badframe
;
flush_icache_range
((
unsigned
long
)
&
frame
->
tramp
[
0
],
flush_icache_range
((
unsigned
long
)
&
frame
->
tramp
[
0
],
(
unsigned
long
)
&
frame
->
tramp
[
2
]);
(
unsigned
long
)
&
frame
->
tramp
[
2
]);
current
->
thread
.
fpscr
=
0
;
/* turn off all fp exceptions */
current
->
thread
.
fpscr
=
0
;
/* turn off all fp exceptions */
/* Retrieve rt_sigframe from stack and
/* Retrieve rt_sigframe from stack and
...
@@ -289,11 +286,11 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
...
@@ -289,11 +286,11 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
*/
*/
newsp
-=
__SIGNAL_FRAMESIZE
;
newsp
-=
__SIGNAL_FRAMESIZE
;
if
(
get_user
(
temp_ptr
,
&
rt_sf
->
uc
.
uc_mcontext
.
handler
))
{
if
(
get_user
(
temp_ptr
,
&
rt_sf
->
uc
.
uc_mcontext
.
handler
))
{
goto
badframe
;
goto
badframe
;
}
}
funct_desc_ptr
=
(
struct
funct_descr_entry
*
)
temp_ptr
;
funct_desc_ptr
=
(
struct
funct_descr_entry
*
)
temp_ptr
;
if
(
put_user
(
regs
->
gpr
[
1
],
(
unsigned
long
*
)
newsp
)
if
(
put_user
(
regs
->
gpr
[
1
],
(
unsigned
long
*
)
newsp
)
||
get_user
(
regs
->
nip
,
&
funct_desc_ptr
->
entry
)
||
get_user
(
regs
->
nip
,
&
funct_desc_ptr
->
entry
)
...
@@ -304,8 +301,8 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
...
@@ -304,8 +301,8 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
goto
badframe
;
goto
badframe
;
regs
->
gpr
[
1
]
=
newsp
;
regs
->
gpr
[
1
]
=
newsp
;
regs
->
gpr
[
6
]
=
(
unsigned
long
)
rt_sf
;
regs
->
gpr
[
6
]
=
(
unsigned
long
)
rt_sf
;
regs
->
link
=
(
unsigned
long
)
frame
->
tramp
;
regs
->
link
=
(
unsigned
long
)
frame
->
tramp
;
return
;
return
;
...
@@ -342,11 +339,11 @@ long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
...
@@ -342,11 +339,11 @@ long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
current
->
blocked
=
set
;
current
->
blocked
=
set
;
recalc_sigpending
();
recalc_sigpending
();
spin_unlock_irq
(
&
current
->
sigmask_lock
);
spin_unlock_irq
(
&
current
->
sigmask_lock
);
if
(
regs
->
msr
&
MSR_FP
)
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
giveup_fpu
(
current
);
/* restore registers */
/* restore registers */
sr
=
(
struct
sigregs
*
)
sigctx
.
regs
;
sr
=
(
struct
sigregs
*
)
sigctx
.
regs
;
if
(
copy_from_user
(
saved_regs
,
&
sr
->
gp_regs
,
sizeof
(
sr
->
gp_regs
)))
if
(
copy_from_user
(
saved_regs
,
&
sr
->
gp_regs
,
sizeof
(
sr
->
gp_regs
)))
goto
badframe
;
goto
badframe
;
saved_regs
[
PT_MSR
]
=
(
regs
->
msr
&
~
MSR_USERCHANGE
)
saved_regs
[
PT_MSR
]
=
(
regs
->
msr
&
~
MSR_USERCHANGE
)
...
@@ -367,8 +364,7 @@ long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
...
@@ -367,8 +364,7 @@ long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
/*
/*
* Set up a signal frame.
* Set up a signal frame.
*/
*/
static
void
static
void
setup_frame
(
struct
pt_regs
*
regs
,
struct
sigregs
*
frame
,
setup_frame
(
struct
pt_regs
*
regs
,
struct
sigregs
*
frame
,
unsigned
long
newsp
)
unsigned
long
newsp
)
{
{
...
@@ -385,7 +381,7 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
...
@@ -385,7 +381,7 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
struct
funct_descr_entry
*
funct_desc_ptr
;
struct
funct_descr_entry
*
funct_desc_ptr
;
unsigned
long
temp_ptr
;
unsigned
long
temp_ptr
;
struct
sigcontext_struct
*
sc
=
(
struct
sigcontext_struct
*
)
newsp
;
struct
sigcontext_struct
*
sc
=
(
struct
sigcontext_struct
*
)
newsp
;
if
(
verify_area
(
VERIFY_WRITE
,
frame
,
sizeof
(
*
frame
)))
if
(
verify_area
(
VERIFY_WRITE
,
frame
,
sizeof
(
*
frame
)))
goto
badframe
;
goto
badframe
;
...
@@ -394,27 +390,29 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
...
@@ -394,27 +390,29 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
if
(
__copy_to_user
(
&
frame
->
gp_regs
,
regs
,
GP_REGS_SIZE
)
if
(
__copy_to_user
(
&
frame
->
gp_regs
,
regs
,
GP_REGS_SIZE
)
||
__copy_to_user
(
&
frame
->
fp_regs
,
current
->
thread
.
fpr
,
||
__copy_to_user
(
&
frame
->
fp_regs
,
current
->
thread
.
fpr
,
ELF_NFPREG
*
sizeof
(
double
))
ELF_NFPREG
*
sizeof
(
double
))
||
__put_user
(
0x38000000UL
+
__NR_sigreturn
,
&
frame
->
tramp
[
0
])
/* li r0, __NR_sigreturn */
/* li r0, __NR_sigreturn */
||
__put_user
(
0x44000002UL
,
&
frame
->
tramp
[
1
]))
/* sc */
||
__put_user
(
0x38000000UL
+
__NR_sigreturn
,
&
frame
->
tramp
[
0
])
/* sc */
||
__put_user
(
0x44000002UL
,
&
frame
->
tramp
[
1
]))
goto
badframe
;
goto
badframe
;
flush_icache_range
((
unsigned
long
)
&
frame
->
tramp
[
0
],
flush_icache_range
((
unsigned
long
)
&
frame
->
tramp
[
0
],
(
unsigned
long
)
&
frame
->
tramp
[
2
]);
(
unsigned
long
)
&
frame
->
tramp
[
2
]);
current
->
thread
.
fpscr
=
0
;
/* turn off all fp exceptions */
current
->
thread
.
fpscr
=
0
;
/* turn off all fp exceptions */
newsp
-=
__SIGNAL_FRAMESIZE
;
newsp
-=
__SIGNAL_FRAMESIZE
;
if
(
get_user
(
temp_ptr
,
&
sc
->
handler
))
if
(
get_user
(
temp_ptr
,
&
sc
->
handler
))
goto
badframe
;
goto
badframe
;
funct_desc_ptr
=
(
struct
funct_descr_entry
*
)
temp_ptr
;
funct_desc_ptr
=
(
struct
funct_descr_entry
*
)
temp_ptr
;
if
(
put_user
(
regs
->
gpr
[
1
],
(
unsigned
long
*
)
newsp
)
if
(
put_user
(
regs
->
gpr
[
1
],
(
unsigned
long
*
)
newsp
)
||
get_user
(
regs
->
nip
,
&
funct_desc_ptr
->
entry
)
||
get_user
(
regs
->
nip
,
&
funct_desc_ptr
->
entry
)
||
get_user
(
regs
->
gpr
[
2
],
&
funct_desc_ptr
->
toc
)
||
get_user
(
regs
->
gpr
[
2
],
&
funct_desc_ptr
->
toc
)
||
get_user
(
regs
->
gpr
[
3
],
&
sc
->
signal
))
||
get_user
(
regs
->
gpr
[
3
],
&
sc
->
signal
))
goto
badframe
;
goto
badframe
;
regs
->
gpr
[
1
]
=
newsp
;
regs
->
gpr
[
1
]
=
newsp
;
regs
->
gpr
[
4
]
=
(
unsigned
long
)
sc
;
regs
->
gpr
[
4
]
=
(
unsigned
long
)
sc
;
regs
->
link
=
(
unsigned
long
)
frame
->
tramp
;
regs
->
link
=
(
unsigned
long
)
frame
->
tramp
;
return
;
return
;
...
@@ -429,8 +427,7 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
...
@@ -429,8 +427,7 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
/*
/*
* OK, we're invoking a handler
* OK, we're invoking a handler
*/
*/
static
void
static
void
handle_signal
(
unsigned
long
sig
,
siginfo_t
*
info
,
sigset_t
*
oldset
,
handle_signal
(
unsigned
long
sig
,
siginfo_t
*
info
,
sigset_t
*
oldset
,
struct
pt_regs
*
regs
,
unsigned
long
*
newspp
,
unsigned
long
frame
)
struct
pt_regs
*
regs
,
unsigned
long
*
newspp
,
unsigned
long
frame
)
{
{
struct
sigcontext_struct
*
sc
;
struct
sigcontext_struct
*
sc
;
...
@@ -447,11 +444,12 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
...
@@ -447,11 +444,12 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
if
(
ka
->
sa
.
sa_flags
&
SA_SIGINFO
)
{
if
(
ka
->
sa
.
sa_flags
&
SA_SIGINFO
)
{
/* Put a Real Time Context onto stack */
/* Put a Real Time Context onto stack */
*
newspp
-=
sizeof
(
*
rt_sf
);
*
newspp
-=
sizeof
(
*
rt_sf
);
rt_sf
=
(
struct
rt_sigframe
*
)
*
newspp
;
rt_sf
=
(
struct
rt_sigframe
*
)
*
newspp
;
if
(
verify_area
(
VERIFY_WRITE
,
rt_sf
,
sizeof
(
*
rt_sf
)))
if
(
verify_area
(
VERIFY_WRITE
,
rt_sf
,
sizeof
(
*
rt_sf
)))
goto
badframe
;
goto
badframe
;
if
(
__put_user
((
unsigned
long
)
ka
->
sa
.
sa_handler
,
&
rt_sf
->
uc
.
uc_mcontext
.
handler
)
if
(
__put_user
((
unsigned
long
)
ka
->
sa
.
sa_handler
,
&
rt_sf
->
uc
.
uc_mcontext
.
handler
)
||
__put_user
(
&
rt_sf
->
info
,
&
rt_sf
->
pinfo
)
||
__put_user
(
&
rt_sf
->
info
,
&
rt_sf
->
pinfo
)
||
__put_user
(
&
rt_sf
->
uc
,
&
rt_sf
->
puc
)
||
__put_user
(
&
rt_sf
->
uc
,
&
rt_sf
->
puc
)
/* Put the siginfo */
/* Put the siginfo */
...
@@ -462,8 +460,10 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
...
@@ -462,8 +460,10 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
||
__put_user
(
current
->
sas_ss_sp
,
&
rt_sf
->
uc
.
uc_stack
.
ss_sp
)
||
__put_user
(
current
->
sas_ss_sp
,
&
rt_sf
->
uc
.
uc_stack
.
ss_sp
)
||
__put_user
(
sas_ss_flags
(
regs
->
gpr
[
1
]),
||
__put_user
(
sas_ss_flags
(
regs
->
gpr
[
1
]),
&
rt_sf
->
uc
.
uc_stack
.
ss_flags
)
&
rt_sf
->
uc
.
uc_stack
.
ss_flags
)
||
__put_user
(
current
->
sas_ss_size
,
&
rt_sf
->
uc
.
uc_stack
.
ss_size
)
||
__put_user
(
current
->
sas_ss_size
,
||
__copy_to_user
(
&
rt_sf
->
uc
.
uc_sigmask
,
oldset
,
sizeof
(
*
oldset
))
&
rt_sf
->
uc
.
uc_stack
.
ss_size
)
||
__copy_to_user
(
&
rt_sf
->
uc
.
uc_sigmask
,
oldset
,
sizeof
(
*
oldset
))
/* mcontext.regs points to preamble register frame */
/* mcontext.regs points to preamble register frame */
||
__put_user
((
struct
pt_regs
*
)
frame
,
&
rt_sf
->
uc
.
uc_mcontext
.
regs
)
||
__put_user
((
struct
pt_regs
*
)
frame
,
&
rt_sf
->
uc
.
uc_mcontext
.
regs
)
||
__put_user
(
sig
,
&
rt_sf
->
uc
.
uc_mcontext
.
signal
))
||
__put_user
(
sig
,
&
rt_sf
->
uc
.
uc_mcontext
.
signal
))
...
@@ -471,11 +471,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
...
@@ -471,11 +471,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
}
else
{
}
else
{
/* Put a sigcontext on the stack */
/* Put a sigcontext on the stack */
*
newspp
-=
sizeof
(
*
sc
);
*
newspp
-=
sizeof
(
*
sc
);
sc
=
(
struct
sigcontext_struct
*
)
*
newspp
;
sc
=
(
struct
sigcontext_struct
*
)
*
newspp
;
if
(
verify_area
(
VERIFY_WRITE
,
sc
,
sizeof
(
*
sc
)))
if
(
verify_area
(
VERIFY_WRITE
,
sc
,
sizeof
(
*
sc
)))
goto
badframe
;
goto
badframe
;
if
(
__put_user
((
unsigned
long
)
ka
->
sa
.
sa_handler
,
&
sc
->
handler
)
if
(
__put_user
((
unsigned
long
)
ka
->
sa
.
sa_handler
,
&
sc
->
handler
)
||
__put_user
(
oldset
->
sig
[
0
],
&
sc
->
oldmask
)
||
__put_user
(
oldset
->
sig
[
0
],
&
sc
->
oldmask
)
#if _NSIG_WORDS > 1
#if _NSIG_WORDS > 1
||
__put_user
(
oldset
->
sig
[
1
],
&
sc
->
_unused
[
3
])
||
__put_user
(
oldset
->
sig
[
1
],
&
sc
->
_unused
[
3
])
...
@@ -512,6 +512,7 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
...
@@ -512,6 +512,7 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
* mistake.
* mistake.
*/
*/
extern
int
do_signal32
(
sigset_t
*
oldset
,
struct
pt_regs
*
regs
);
extern
int
do_signal32
(
sigset_t
*
oldset
,
struct
pt_regs
*
regs
);
int
do_signal
(
sigset_t
*
oldset
,
struct
pt_regs
*
regs
)
int
do_signal
(
sigset_t
*
oldset
,
struct
pt_regs
*
regs
)
{
{
siginfo_t
info
;
siginfo_t
info
;
...
@@ -534,8 +535,8 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
...
@@ -534,8 +535,8 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
signr
=
get_signal_to_deliver
(
&
info
,
regs
);
signr
=
get_signal_to_deliver
(
&
info
,
regs
);
if
(
signr
>
0
)
{
if
(
signr
>
0
)
{
ka
=
&
current
->
sig
->
action
[
signr
-
1
];
ka
=
&
current
->
sig
->
action
[
signr
-
1
];
if
(
(
ka
->
sa
.
sa_flags
&
SA_ONSTACK
)
if
((
ka
->
sa
.
sa_flags
&
SA_ONSTACK
)
&&
(
!
on_sig_stack
(
regs
->
gpr
[
1
])))
&&
(
!
on_sig_stack
(
regs
->
gpr
[
1
])))
newsp
=
(
current
->
sas_ss_sp
+
current
->
sas_ss_size
);
newsp
=
(
current
->
sas_ss_sp
+
current
->
sas_ss_size
);
else
else
newsp
=
regs
->
gpr
[
1
];
newsp
=
regs
->
gpr
[
1
];
...
@@ -557,9 +558,10 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
...
@@ -557,9 +558,10 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
if
(
newsp
==
frame
)
if
(
newsp
==
frame
)
return
0
;
/* no signals delivered */
return
0
;
/* no signals delivered */
/* Invoke correct stack setup routine */
if
(
ka
->
sa
.
sa_flags
&
SA_SIGINFO
)
if
(
ka
->
sa
.
sa_flags
&
SA_SIGINFO
)
setup_rt_frame
(
regs
,
(
struct
sigregs
*
)
frame
,
newsp
);
setup_rt_frame
(
regs
,
(
struct
sigregs
*
)
frame
,
newsp
);
else
else
setup_frame
(
regs
,
(
struct
sigregs
*
)
frame
,
newsp
);
setup_frame
(
regs
,
(
struct
sigregs
*
)
frame
,
newsp
);
return
1
;
return
1
;
}
}
arch/ppc64/kernel/signal32.c
View file @
9bc8ec89
...
@@ -14,43 +14,19 @@
...
@@ -14,43 +14,19 @@
* 2 of the License, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*/
*/
#include <asm/ptrace.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/utime.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/kernel.h>
#include <linux/msg.h>
#include <linux/signal.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/nfs_fs.h>
#include <linux/smb_fs.h>
#include <linux/smb_mount.h>
#include <linux/ncp_fs.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/filter.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/elf.h>
#include <asm/types.h>
#include <asm/ipc.h>
#include <asm/uaccess.h>
#include <asm/ppc32.h>
#include <asm/ppc32.h>
#include <asm/uaccess.h>
#include <asm/ppcdebug.h>
#include <asm/ppcdebug.h>
#include <asm/unistd.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/*
/*
...
@@ -112,9 +88,6 @@ struct rt_sigframe_32 {
...
@@ -112,9 +88,6 @@ struct rt_sigframe_32 {
};
};
extern
asmlinkage
long
sys_wait4
(
pid_t
pid
,
unsigned
int
*
stat_addr
,
int
options
,
struct
rusage
*
ru
);
/*
/*
* Start of nonRT signal support
* Start of nonRT signal support
...
@@ -133,7 +106,7 @@ extern asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr,
...
@@ -133,7 +106,7 @@ extern asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr,
* setup_frame32
* setup_frame32
*/
*/
asmlinkage
long
sys32_sigaction
(
int
sig
,
struct
old_sigaction32
*
act
,
long
sys32_sigaction
(
int
sig
,
struct
old_sigaction32
*
act
,
struct
old_sigaction32
*
oact
)
struct
old_sigaction32
*
oact
)
{
{
struct
k_sigaction
new_ka
,
old_ka
;
struct
k_sigaction
new_ka
,
old_ka
;
...
@@ -145,32 +118,30 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 *act,
...
@@ -145,32 +118,30 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 *act,
if
(
act
)
{
if
(
act
)
{
old_sigset_t32
mask
;
old_sigset_t32
mask
;
ret
=
get_user
((
long
)
new_ka
.
sa
.
sa_handler
,
&
act
->
sa_handler
);
if
(
get_user
((
long
)
new_ka
.
sa
.
sa_handler
,
&
act
->
sa_handler
)
||
ret
|=
__get_user
((
long
)
new_ka
.
sa
.
sa_restorer
,
&
act
->
sa_restorer
);
__get_user
((
long
)
new_ka
.
sa
.
sa_restorer
,
&
act
->
sa_restorer
)
||
ret
|=
__get_user
(
new_ka
.
sa
.
sa_flags
,
&
act
->
sa_flags
);
__get_user
(
new_ka
.
sa
.
sa_flags
,
&
act
->
sa_flags
)
||
ret
|=
__get_user
(
mask
,
&
act
->
sa_mask
);
__get_user
(
mask
,
&
act
->
sa_mask
))
if
(
ret
)
return
-
EFAULT
;
return
ret
;
siginitset
(
&
new_ka
.
sa
.
sa_mask
,
mask
);
siginitset
(
&
new_ka
.
sa
.
sa_mask
,
mask
);
}
}
ret
=
do_sigaction
(
sig
,
act
?
&
new_ka
:
NULL
,
oact
?
&
old_ka
:
NULL
);
ret
=
do_sigaction
(
sig
,
act
?
&
new_ka
:
NULL
,
oact
?
&
old_ka
:
NULL
);
if
(
!
ret
&&
oact
)
{
if
(
!
ret
&&
oact
)
{
ret
=
put_user
((
long
)
old_ka
.
sa
.
sa_handler
,
&
oact
->
sa_handler
);
if
(
put_user
((
long
)
old_ka
.
sa
.
sa_handler
,
&
oact
->
sa_handler
)
||
ret
|=
__put_user
((
long
)
old_ka
.
sa
.
sa_restorer
,
&
oact
->
sa_restorer
);
__put_user
((
long
)
old_ka
.
sa
.
sa_restorer
,
&
oact
->
sa_restorer
)
||
ret
|=
__put_user
(
old_ka
.
sa
.
sa_flags
,
&
oact
->
sa_flags
);
__put_user
(
old_ka
.
sa
.
sa_flags
,
&
oact
->
sa_flags
)
||
ret
|=
__put_user
(
old_ka
.
sa
.
sa_mask
.
sig
[
0
],
&
oact
->
sa_mask
);
__put_user
(
old_ka
.
sa
.
sa_mask
.
sig
[
0
],
&
oact
->
sa_mask
))
return
-
EFAULT
;
}
}
return
ret
;
return
ret
;
}
}
extern
long
sys_sigpending
(
old_sigset_t
*
set
);
long
sys32_sigpending
(
old_sigset_t32
*
set
)
extern
asmlinkage
long
sys_sigpending
(
old_sigset_t
*
set
);
asmlinkage
long
sys32_sigpending
(
old_sigset_t32
*
set
)
{
{
old_sigset_t
s
;
old_sigset_t
s
;
int
ret
;
int
ret
;
...
@@ -185,9 +156,7 @@ asmlinkage long sys32_sigpending(old_sigset_t32 *set)
...
@@ -185,9 +156,7 @@ asmlinkage long sys32_sigpending(old_sigset_t32 *set)
}
}
extern
long
sys_sigprocmask
(
int
how
,
old_sigset_t
*
set
,
extern
asmlinkage
long
sys_sigprocmask
(
int
how
,
old_sigset_t
*
set
,
old_sigset_t
*
oset
);
old_sigset_t
*
oset
);
/*
/*
...
@@ -197,7 +166,7 @@ extern asmlinkage long sys_sigprocmask(int how, old_sigset_t *set,
...
@@ -197,7 +166,7 @@ extern asmlinkage long sys_sigprocmask(int how, old_sigset_t *set,
* of a signed int (msr in 32-bit mode) and the register representation
* of a signed int (msr in 32-bit mode) and the register representation
* of a signed int (msr in 64-bit mode) is performed.
* of a signed int (msr in 64-bit mode) is performed.
*/
*/
asmlinkage
long
sys32_sigprocmask
(
u32
how
,
old_sigset_t32
*
set
,
long
sys32_sigprocmask
(
u32
how
,
old_sigset_t32
*
set
,
old_sigset_t32
*
oset
)
old_sigset_t32
*
oset
)
{
{
old_sigset_t
s
;
old_sigset_t
s
;
...
@@ -252,23 +221,21 @@ long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
...
@@ -252,23 +221,21 @@ long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
* Note that PPC32 puts the upper 32 bits of the sigmask in the
* Note that PPC32 puts the upper 32 bits of the sigmask in the
* unused part of the signal stackframe
* unused part of the signal stackframe
*/
*/
set
.
sig
[
0
]
=
sigctx
.
oldmask
+
((
long
)(
sigctx
.
_unused
[
3
])
<<
32
);
set
.
sig
[
0
]
=
sigctx
.
oldmask
+
((
long
)(
sigctx
.
_unused
[
3
])
<<
32
);
sigdelsetmask
(
&
set
,
~
_BLOCKABLE
);
sigdelsetmask
(
&
set
,
~
_BLOCKABLE
);
spin_lock_irq
(
&
current
->
sigmask_lock
);
spin_lock_irq
(
&
current
->
sigmask_lock
);
current
->
blocked
=
set
;
current
->
blocked
=
set
;
recalc_sigpending
();
recalc_sigpending
();
spin_unlock_irq
(
&
current
->
sigmask_lock
);
spin_unlock_irq
(
&
current
->
sigmask_lock
);
/* Last stacked signal - restore registers */
sr
=
(
struct
sigregs32
*
)(
u64
)
sigctx
.
regs
;
if
(
regs
->
msr
&
MSR_FP
)
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
giveup_fpu
(
current
);
/* Last stacked signal - restore registers */
sr
=
(
struct
sigregs32
*
)(
u64
)
sigctx
.
regs
;
/*
/*
* copy the 32 bit register values off the user stack
* copy the 32 bit register values off the user stack
* into the 32 bit register area
* into the 32 bit register area
*/
*/
if
(
copy_from_user
(
saved_regs
,
&
sr
->
gp_regs
,
if
(
copy_from_user
(
saved_regs
,
&
sr
->
gp_regs
,
sizeof
(
sr
->
gp_regs
)))
sizeof
(
sr
->
gp_regs
)))
goto
badframe
;
goto
badframe
;
/*
/*
* The saved reg structure in the frame is an elf_grepset_t32,
* The saved reg structure in the frame is an elf_grepset_t32,
...
@@ -323,7 +290,6 @@ long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
...
@@ -323,7 +290,6 @@ long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
goto
badframe
;
goto
badframe
;
ret
=
regs
->
result
;
ret
=
regs
->
result
;
return
ret
;
return
ret
;
badframe:
badframe:
...
@@ -387,12 +353,13 @@ static void setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
...
@@ -387,12 +353,13 @@ static void setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
*/
*/
if
(
__copy_to_user
(
&
frame
->
fp_regs
,
current
->
thread
.
fpr
,
if
(
__copy_to_user
(
&
frame
->
fp_regs
,
current
->
thread
.
fpr
,
ELF_NFPREG
*
sizeof
(
double
))
ELF_NFPREG
*
sizeof
(
double
))
||
__put_user
(
0x38000000U
+
__NR_sigreturn
,
&
frame
->
tramp
[
0
])
/* li r0, __NR_sigreturn */
/* li r0, __NR_sigreturn */
||
__put_user
(
0x44000002U
,
&
frame
->
tramp
[
1
]))
/* sc */
||
__put_user
(
0x38000000U
+
__NR_sigreturn
,
&
frame
->
tramp
[
0
])
/* sc */
||
__put_user
(
0x44000002U
,
&
frame
->
tramp
[
1
]))
goto
badframe
;
goto
badframe
;
flush_icache_range
((
unsigned
long
)
&
frame
->
tramp
[
0
],
flush_icache_range
((
unsigned
long
)
&
frame
->
tramp
[
0
],
(
unsigned
long
)
&
frame
->
tramp
[
2
]);
(
unsigned
long
)
&
frame
->
tramp
[
2
]);
current
->
thread
.
fpscr
=
0
;
/* turn off all fp exceptions */
current
->
thread
.
fpscr
=
0
;
/* turn off all fp exceptions */
newsp
-=
__SIGNAL_FRAMESIZE32
;
newsp
-=
__SIGNAL_FRAMESIZE32
;
...
@@ -438,7 +405,7 @@ static void setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
...
@@ -438,7 +405,7 @@ static void setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
*
*
* Other routines
* Other routines
* setup_rt_frame32
* setup_rt_frame32
*
siginfo64to
32
*
copy_siginfo_to_user
32
* siginfo32to64
* siginfo32to64
*/
*/
...
@@ -451,50 +418,45 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
...
@@ -451,50 +418,45 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned
long
r6
,
unsigned
long
r7
,
unsigned
long
r8
,
unsigned
long
r6
,
unsigned
long
r7
,
unsigned
long
r8
,
struct
pt_regs
*
regs
)
struct
pt_regs
*
regs
)
{
{
struct
rt_sigframe_32
*
rt_s
tack_frame
;
struct
rt_sigframe_32
*
rt_s
f
;
struct
sigcontext32_struct
sigctx
;
struct
sigcontext32_struct
sigctx
;
struct
sigregs32
*
s
ignalregs
;
struct
sigregs32
*
s
r
;
int
ret
;
int
ret
;
elf_gregset_t32
saved_regs
;
/* an array of 32 bit register values */
elf_gregset_t32
saved_regs
;
/* an array of 32 bit register values */
sigset_t
s
ignal_s
et
;
sigset_t
set
;
stack_t
st
ack
;
stack_t
st
;
int
i
;
int
i
;
ret
=
0
;
/* Adjust the inputted reg1 to point to the first rt signal frame */
/* Adjust the inputted reg1 to point to the first rt signal frame */
rt_s
tack_frame
=
(
struct
rt_sigframe_32
*
)(
regs
->
gpr
[
1
]
+
__SIGNAL_FRAMESIZE32
);
rt_s
f
=
(
struct
rt_sigframe_32
*
)(
regs
->
gpr
[
1
]
+
__SIGNAL_FRAMESIZE32
);
/* Copy the information from the user stack */
/* Copy the information from the user stack */
if
(
copy_from_user
(
&
sigctx
,
&
rt_stack_frame
->
uc
.
uc_mcontext
,
if
(
copy_from_user
(
&
sigctx
,
&
rt_sf
->
uc
.
uc_mcontext
,
sizeof
(
sigctx
))
sizeof
(
sigctx
))
||
copy_from_user
(
&
set
,
&
rt_sf
->
uc
.
uc_sigmask
,
sizeof
(
set
))
||
copy_from_user
(
&
signal_set
,
&
rt_stack_frame
->
uc
.
uc_sigmask
,
||
copy_from_user
(
&
st
,
&
rt_sf
->
uc
.
uc_stack
,
sizeof
(
st
)))
sizeof
(
signal_set
))
||
copy_from_user
(
&
stack
,
&
rt_stack_frame
->
uc
.
uc_stack
,
sizeof
(
stack
)))
goto
badframe
;
goto
badframe
;
/*
/*
* Unblock the signal that was processed
* Unblock the signal that was processed
* After a signal handler runs -
* After a signal handler runs -
* if the signal is blockable - the signal will be unblocked
* if the signal is blockable - the signal will be unblocked
* (
sigkill and sigstop are not blockable)
* (sigkill and sigstop are not blockable)
*/
*/
sigdelsetmask
(
&
s
ignal_s
et
,
~
_BLOCKABLE
);
sigdelsetmask
(
&
set
,
~
_BLOCKABLE
);
/* update the current based on the sigmask found in the rt_stackframe */
/* update the current based on the sigmask found in the rt_stackframe */
spin_lock_irq
(
&
current
->
sigmask_lock
);
spin_lock_irq
(
&
current
->
sigmask_lock
);
current
->
blocked
=
s
ignal_s
et
;
current
->
blocked
=
set
;
recalc_sigpending
();
recalc_sigpending
();
spin_unlock_irq
(
&
current
->
sigmask_lock
);
spin_unlock_irq
(
&
current
->
sigmask_lock
);
/* If currently owning the floating point - give them up */
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
/*
/*
* Set to point to the next rt_sigframe - this is used to
* Set to point to the next rt_sigframe - this is used to
* determine whether this is the last signal to process
* determine whether this is the last signal to process
*/
*/
signalregs
=
(
struct
sigregs32
*
)
(
u64
)
sigctx
.
regs
;
sr
=
(
struct
sigregs32
*
)(
u64
)
sigctx
.
regs
;
/* If currently owning the floating point - give them up */
if
(
copy_from_user
(
saved_regs
,
&
sr
->
gp_regs
,
sizeof
(
sr
->
gp_regs
)))
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
if
(
copy_from_user
(
saved_regs
,
&
signalregs
->
gp_regs
,
sizeof
(
signalregs
->
gp_regs
)))
goto
badframe
;
goto
badframe
;
/*
/*
* The saved reg structure in the frame is an elf_grepset_t32,
* The saved reg structure in the frame is an elf_grepset_t32,
...
@@ -544,7 +506,7 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
...
@@ -544,7 +506,7 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
asmlinkage
long
sys32_rt_sigaction
(
int
sig
,
const
struct
sigaction32
*
act
,
long
sys32_rt_sigaction
(
int
sig
,
const
struct
sigaction32
*
act
,
struct
sigaction32
*
oact
,
size_t
sigsetsize
)
struct
sigaction32
*
oact
,
size_t
sigsetsize
)
{
{
struct
k_sigaction
new_ka
,
old_ka
;
struct
k_sigaction
new_ka
,
old_ka
;
...
@@ -599,7 +561,7 @@ asmlinkage long sys32_rt_sigaction(int sig, const struct sigaction32 *act,
...
@@ -599,7 +561,7 @@ asmlinkage long sys32_rt_sigaction(int sig, const struct sigaction32 *act,
}
}
extern
asmlinkage
long
sys_rt_sigprocmask
(
int
how
,
sigset_t
*
set
,
extern
long
sys_rt_sigprocmask
(
int
how
,
sigset_t
*
set
,
sigset_t
*
oset
,
size_t
sigsetsize
);
sigset_t
*
oset
,
size_t
sigsetsize
);
/*
/*
...
@@ -609,7 +571,7 @@ extern asmlinkage long sys_rt_sigprocmask(int how, sigset_t *set,
...
@@ -609,7 +571,7 @@ extern asmlinkage long sys_rt_sigprocmask(int how, sigset_t *set,
* of a signed int (msr in 32-bit mode) and the register representation
* of a signed int (msr in 32-bit mode) and the register representation
* of a signed int (msr in 64-bit mode) is performed.
* of a signed int (msr in 64-bit mode) is performed.
*/
*/
asmlinkage
long
sys32_rt_sigprocmask
(
u32
how
,
sigset32_t
*
set
,
long
sys32_rt_sigprocmask
(
u32
how
,
sigset32_t
*
set
,
sigset32_t
*
oset
,
size_t
sigsetsize
)
sigset32_t
*
oset
,
size_t
sigsetsize
)
{
{
sigset_t
s
;
sigset_t
s
;
...
@@ -649,10 +611,10 @@ asmlinkage long sys32_rt_sigprocmask(u32 how, sigset32_t *set,
...
@@ -649,10 +611,10 @@ asmlinkage long sys32_rt_sigprocmask(u32 how, sigset32_t *set,
}
}
extern
asmlinkage
long
sys_rt_sigpending
(
sigset_t
*
set
,
size_t
sigsetsize
);
extern
long
sys_rt_sigpending
(
sigset_t
*
set
,
size_t
sigsetsize
);
asmlinkage
long
sys32_rt_sigpending
(
sigset32_t
*
set
,
long
sys32_rt_sigpending
(
sigset32_t
*
set
,
__kernel_size_t32
sigsetsize
)
__kernel_size_t32
sigsetsize
)
{
{
sigset_t
s
;
sigset_t
s
;
...
@@ -677,50 +639,54 @@ asmlinkage long sys32_rt_sigpending(sigset32_t *set,
...
@@ -677,50 +639,54 @@ asmlinkage long sys32_rt_sigpending(sigset32_t *set,
}
}
s
iginfo_t32
*
siginfo64to
32
(
siginfo_t32
*
d
,
siginfo_t
*
s
)
s
tatic
int
copy_siginfo_to_user
32
(
siginfo_t32
*
d
,
siginfo_t
*
s
)
{
{
memset
(
d
,
0
,
sizeof
(
siginfo_t32
));
int
err
;
d
->
si_signo
=
s
->
si_signo
;
d
->
si_errno
=
s
->
si_errno
;
if
(
!
access_ok
(
VERIFY_WRITE
,
d
,
sizeof
(
*
d
)))
/* XXX why dont we just implement copy_siginfo_to_user32? - Anton */
return
-
EFAULT
;
d
->
si_code
=
s
->
si_code
&
0xffff
;
err
=
__put_user
(
s
->
si_signo
,
&
d
->
si_signo
);
err
|=
__put_user
(
s
->
si_errno
,
&
d
->
si_errno
);
err
|=
__put_user
((
short
)
s
->
si_code
,
&
d
->
si_code
);
if
(
s
->
si_signo
>=
SIGRTMIN
)
{
if
(
s
->
si_signo
>=
SIGRTMIN
)
{
d
->
si_pid
=
s
->
si_pid
;
err
|=
__put_user
(
s
->
si_pid
,
&
d
->
si_pid
)
;
d
->
si_uid
=
s
->
si_uid
;
err
|=
__put_user
(
s
->
si_uid
,
&
d
->
si_uid
)
;
d
->
si_int
=
s
->
si_int
;
err
|=
__put_user
(
s
->
si_int
,
&
d
->
si_int
)
;
}
else
{
}
else
{
switch
(
s
->
si_signo
)
{
switch
(
s
->
si_signo
)
{
/* XXX: What about POSIX1.b timers */
/* XXX: What about POSIX1.b timers */
case
SIGCHLD
:
case
SIGCHLD
:
d
->
si_pid
=
s
->
si_pid
;
err
|=
__put_user
(
s
->
si_pid
,
&
d
->
si_pid
)
;
d
->
si_status
=
s
->
si_status
;
err
|=
__put_user
(
s
->
si_status
,
&
d
->
si_status
)
;
d
->
si_utime
=
s
->
si_utime
;
err
|=
__put_user
(
s
->
si_utime
,
&
d
->
si_utime
)
;
d
->
si_stime
=
s
->
si_stime
;
err
|=
__put_user
(
s
->
si_stime
,
&
d
->
si_stime
)
;
break
;
break
;
case
SIGSEGV
:
case
SIGSEGV
:
case
SIGBUS
:
case
SIGBUS
:
case
SIGFPE
:
case
SIGFPE
:
case
SIGILL
:
case
SIGILL
:
d
->
si_addr
=
(
long
)(
s
->
si_addr
);
err
|=
__put_user
((
long
)(
s
->
si_addr
),
&
d
->
si_addr
);
break
;
break
;
case
SIGPOLL
:
case
SIGPOLL
:
d
->
si_band
=
s
->
si_band
;
err
|=
__put_user
(
s
->
si_band
,
&
d
->
si_band
)
;
d
->
si_fd
=
s
->
si_fd
;
err
|=
__put_user
(
s
->
si_fd
,
&
d
->
si_fd
)
;
break
;
break
;
default:
default:
d
->
si_pid
=
s
->
si_pid
;
err
|=
__put_user
(
s
->
si_pid
,
&
d
->
si_pid
)
;
d
->
si_uid
=
s
->
si_uid
;
err
|=
__put_user
(
s
->
si_uid
,
&
d
->
si_uid
)
;
break
;
break
;
}
}
}
}
return
d
;
return
err
;
}
}
extern
asmlinkage
long
sys_rt_sigtimedwait
(
const
sigset_t
*
uthese
,
extern
long
sys_rt_sigtimedwait
(
const
sigset_t
*
uthese
,
siginfo_t
*
uinfo
,
const
struct
timespec
*
uts
,
siginfo_t
*
uinfo
,
const
struct
timespec
*
uts
,
size_t
sigsetsize
);
size_t
sigsetsize
);
asmlinkage
long
sys32_rt_sigtimedwait
(
sigset32_t
*
uthese
,
siginfo_t32
*
uinfo
,
long
sys32_rt_sigtimedwait
(
sigset32_t
*
uthese
,
siginfo_t32
*
uinfo
,
struct
timespec32
*
uts
,
__kernel_size_t32
sigsetsize
)
struct
timespec32
*
uts
,
__kernel_size_t32
sigsetsize
)
{
{
sigset_t
s
;
sigset_t
s
;
...
@@ -729,7 +695,6 @@ asmlinkage long sys32_rt_sigtimedwait(sigset32_t *uthese, siginfo_t32 *uinfo,
...
@@ -729,7 +695,6 @@ asmlinkage long sys32_rt_sigtimedwait(sigset32_t *uthese, siginfo_t32 *uinfo,
int
ret
;
int
ret
;
mm_segment_t
old_fs
=
get_fs
();
mm_segment_t
old_fs
=
get_fs
();
siginfo_t
info
;
siginfo_t
info
;
siginfo_t32
info32
;
if
(
copy_from_user
(
&
s32
,
uthese
,
sizeof
(
sigset32_t
)))
if
(
copy_from_user
(
&
s32
,
uthese
,
sizeof
(
sigset32_t
)))
return
-
EFAULT
;
return
-
EFAULT
;
...
@@ -753,8 +718,7 @@ asmlinkage long sys32_rt_sigtimedwait(sigset32_t *uthese, siginfo_t32 *uinfo,
...
@@ -753,8 +718,7 @@ asmlinkage long sys32_rt_sigtimedwait(sigset32_t *uthese, siginfo_t32 *uinfo,
sigsetsize
);
sigsetsize
);
set_fs
(
old_fs
);
set_fs
(
old_fs
);
if
(
ret
>=
0
&&
uinfo
)
{
if
(
ret
>=
0
&&
uinfo
)
{
if
(
copy_to_user
(
uinfo
,
siginfo64to32
(
&
info32
,
&
info
),
if
(
copy_siginfo_to_user32
(
uinfo
,
&
info
))
sizeof
(
siginfo_t32
)))
return
-
EFAULT
;
return
-
EFAULT
;
}
}
return
ret
;
return
ret
;
...
@@ -762,7 +726,7 @@ asmlinkage long sys32_rt_sigtimedwait(sigset32_t *uthese, siginfo_t32 *uinfo,
...
@@ -762,7 +726,7 @@ asmlinkage long sys32_rt_sigtimedwait(sigset32_t *uthese, siginfo_t32 *uinfo,
siginfo_t
*
siginfo32to64
(
siginfo_t
*
d
,
siginfo_t32
*
s
)
s
tatic
s
iginfo_t
*
siginfo32to64
(
siginfo_t
*
d
,
siginfo_t32
*
s
)
{
{
d
->
si_signo
=
s
->
si_signo
;
d
->
si_signo
=
s
->
si_signo
;
d
->
si_errno
=
s
->
si_errno
;
d
->
si_errno
=
s
->
si_errno
;
...
@@ -800,7 +764,7 @@ siginfo_t * siginfo32to64(siginfo_t *d, siginfo_t32 *s)
...
@@ -800,7 +764,7 @@ siginfo_t * siginfo32to64(siginfo_t *d, siginfo_t32 *s)
}
}
extern
asmlinkage
long
sys_rt_sigqueueinfo
(
int
pid
,
int
sig
,
siginfo_t
*
uinfo
);
extern
long
sys_rt_sigqueueinfo
(
int
pid
,
int
sig
,
siginfo_t
*
uinfo
);
/*
/*
* Note: it is necessary to treat pid and sig as unsigned ints, with the
* Note: it is necessary to treat pid and sig as unsigned ints, with the
...
@@ -809,7 +773,7 @@ extern asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo);
...
@@ -809,7 +773,7 @@ extern asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo);
* (msr in 32-bit mode) and the register representation of a signed int
* (msr in 32-bit mode) and the register representation of a signed int
* (msr in 64-bit mode) is performed.
* (msr in 64-bit mode) is performed.
*/
*/
asmlinkage
long
sys32_rt_sigqueueinfo
(
u32
pid
,
u32
sig
,
siginfo_t32
*
uinfo
)
long
sys32_rt_sigqueueinfo
(
u32
pid
,
u32
sig
,
siginfo_t32
*
uinfo
)
{
{
siginfo_t
info
;
siginfo_t
info
;
siginfo_t32
info32
;
siginfo_t32
info32
;
...
@@ -974,8 +938,7 @@ static void handle_signal32(unsigned long sig, siginfo_t *info,
...
@@ -974,8 +938,7 @@ static void handle_signal32(unsigned long sig, siginfo_t *info,
unsigned
int
frame
)
unsigned
int
frame
)
{
{
struct
sigcontext32_struct
*
sc
;
struct
sigcontext32_struct
*
sc
;
struct
rt_sigframe_32
*
rt_stack_frame
;
struct
rt_sigframe_32
*
rt_sf
;
siginfo_t32
siginfo32bit
;
struct
k_sigaction
*
ka
=
&
current
->
sig
->
action
[
sig
-
1
];
struct
k_sigaction
*
ka
=
&
current
->
sig
->
action
[
sig
-
1
];
if
(
regs
->
trap
==
0x0C00
/* System Call! */
if
(
regs
->
trap
==
0x0C00
/* System Call! */
...
@@ -986,42 +949,35 @@ static void handle_signal32(unsigned long sig, siginfo_t *info,
...
@@ -986,42 +949,35 @@ static void handle_signal32(unsigned long sig, siginfo_t *info,
/*
/*
* Set up the signal frame
* Set up the signal frame
* Determine if a
n real time frame - siginfo
required
* Determine if a
real time frame and a siginfo is
required
*/
*/
if
(
ka
->
sa
.
sa_flags
&
SA_SIGINFO
)
{
if
(
ka
->
sa
.
sa_flags
&
SA_SIGINFO
)
{
siginfo64to32
(
&
siginfo32bit
,
info
);
*
newspp
-=
sizeof
(
*
rt_sf
);
*
newspp
-=
sizeof
(
*
rt_stack_frame
);
rt_sf
=
(
struct
rt_sigframe_32
*
)(
u64
)(
*
newspp
);
rt_stack_frame
=
(
struct
rt_sigframe_32
*
)(
u64
)(
*
newspp
);
if
(
verify_area
(
VERIFY_WRITE
,
rt_sf
,
sizeof
(
*
rt_sf
)))
if
(
verify_area
(
VERIFY_WRITE
,
rt_stack_frame
,
sizeof
(
*
rt_stack_frame
)))
goto
badframe
;
goto
badframe
;
if
(
__put_user
((
u32
)(
u64
)
ka
->
sa
.
sa_handler
,
if
(
__put_user
((
u32
)(
u64
)
ka
->
sa
.
sa_handler
,
&
rt_stack_frame
->
uc
.
uc_mcontext
.
handler
)
&
rt_sf
->
uc
.
uc_mcontext
.
handler
)
||
__put_user
((
u32
)(
u64
)
&
rt_stack_frame
->
info
,
||
__put_user
((
u32
)(
u64
)
&
rt_sf
->
info
,
&
rt_sf
->
pinfo
)
&
rt_stack_frame
->
pinfo
)
||
__put_user
((
u32
)(
u64
)
&
rt_sf
->
uc
,
&
rt_sf
->
puc
)
||
__put_user
((
u32
)(
u64
)
&
rt_stack_frame
->
uc
,
&
rt_stack_frame
->
puc
)
/* put the siginfo on the user stack */
/* put the siginfo on the user stack */
||
__copy_to_user
(
&
rt_stack_frame
->
info
,
&
siginfo32bit
,
||
copy_siginfo_to_user32
(
&
rt_sf
->
info
,
info
)
sizeof
(
siginfo32bit
))
/* set the ucontext on the user stack */
/* set the ucontext on the user stack */
||
__put_user
(
0
,
&
rt_stack_frame
->
uc
.
uc_flags
)
||
__put_user
(
0
,
&
rt_sf
->
uc
.
uc_flags
)
||
__put_user
(
0
,
&
rt_stack_frame
->
uc
.
uc_link
)
||
__put_user
(
0
,
&
rt_sf
->
uc
.
uc_link
)
||
__put_user
(
current
->
sas_ss_sp
,
||
__put_user
(
current
->
sas_ss_sp
,
&
rt_sf
->
uc
.
uc_stack
.
ss_sp
)
&
rt_stack_frame
->
uc
.
uc_stack
.
ss_sp
)
||
__put_user
(
sas_ss_flags
(
regs
->
gpr
[
1
]),
||
__put_user
(
sas_ss_flags
(
regs
->
gpr
[
1
]),
&
rt_s
tack_frame
->
uc
.
uc_stack
.
ss_flags
)
&
rt_s
f
->
uc
.
uc_stack
.
ss_flags
)
||
__put_user
(
current
->
sas_ss_size
,
||
__put_user
(
current
->
sas_ss_size
,
&
rt_s
tack_frame
->
uc
.
uc_stack
.
ss_size
)
&
rt_s
f
->
uc
.
uc_stack
.
ss_size
)
||
__copy_to_user
(
&
rt_s
tack_frame
->
uc
.
uc_sigmask
,
||
__copy_to_user
(
&
rt_s
f
->
uc
.
uc_sigmask
,
oldset
,
sizeof
(
*
oldset
))
oldset
,
sizeof
(
*
oldset
))
/* point the mcontext.regs to the pramble register frame */
/* point the mcontext.regs to the pramble register frame */
||
__put_user
(
frame
,
&
rt_s
tack_frame
->
uc
.
uc_mcontext
.
regs
)
||
__put_user
(
frame
,
&
rt_s
f
->
uc
.
uc_mcontext
.
regs
)
||
__put_user
(
sig
,
&
rt_s
tack_frame
->
uc
.
uc_mcontext
.
signal
))
||
__put_user
(
sig
,
&
rt_s
f
->
uc
.
uc_mcontext
.
signal
))
goto
badframe
;
goto
badframe
;
}
else
{
}
else
{
/* Put a
nother
sigcontext on the stack */
/* Put a sigcontext on the stack */
*
newspp
-=
sizeof
(
*
sc
);
*
newspp
-=
sizeof
(
*
sc
);
sc
=
(
struct
sigcontext32_struct
*
)(
u64
)
*
newspp
;
sc
=
(
struct
sigcontext32_struct
*
)(
u64
)
*
newspp
;
if
(
verify_area
(
VERIFY_WRITE
,
sc
,
sizeof
(
*
sc
)))
if
(
verify_area
(
VERIFY_WRITE
,
sc
,
sizeof
(
*
sc
)))
...
@@ -1048,7 +1004,6 @@ static void handle_signal32(unsigned long sig, siginfo_t *info,
...
@@ -1048,7 +1004,6 @@ static void handle_signal32(unsigned long sig, siginfo_t *info,
recalc_sigpending
();
recalc_sigpending
();
spin_unlock_irq
(
&
current
->
sigmask_lock
);
spin_unlock_irq
(
&
current
->
sigmask_lock
);
}
}
return
;
return
;
badframe:
badframe:
...
@@ -1068,7 +1023,7 @@ static void handle_signal32(unsigned long sig, siginfo_t *info,
...
@@ -1068,7 +1023,7 @@ static void handle_signal32(unsigned long sig, siginfo_t *info,
* sigaltatck sys32_sigaltstack
* sigaltatck sys32_sigaltstack
*/
*/
asmlinkage
int
sys32_sigaltstack
(
u32
newstack
,
u32
oldstack
,
int
p3
,
int
sys32_sigaltstack
(
u32
newstack
,
u32
oldstack
,
int
p3
,
int
p4
,
int
p6
,
int
p7
,
struct
pt_regs
*
regs
)
int
p4
,
int
p6
,
int
p7
,
struct
pt_regs
*
regs
)
{
{
stack_t
uss
,
uoss
;
stack_t
uss
,
uoss
;
...
@@ -1114,7 +1069,7 @@ asmlinkage int sys32_sigaltstack(u32 newstack, u32 oldstack, int p3,
...
@@ -1114,7 +1069,7 @@ asmlinkage int sys32_sigaltstack(u32 newstack, u32 oldstack, int p3,
/*
/*
* Start of do_signal32 routine
* Start of do_signal32 routine
*
*
* This routine gets control when a pe
m
ding signal needs to be processed
* This routine gets control when a pe
n
ding signal needs to be processed
* in the 32 bit target thread -
* in the 32 bit target thread -
*
*
* It handles both rt and non-rt signals
* It handles both rt and non-rt signals
...
@@ -1141,13 +1096,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
...
@@ -1141,13 +1096,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
signr
=
get_signal_to_deliver
(
&
info
,
regs
);
signr
=
get_signal_to_deliver
(
&
info
,
regs
);
if
(
signr
>
0
)
{
if
(
signr
>
0
)
{
ka
=
&
current
->
sig
->
action
[
signr
-
1
];
ka
=
&
current
->
sig
->
action
[
signr
-
1
];
if
((
ka
->
sa
.
sa_flags
&
SA_ONSTACK
)
if
((
ka
->
sa
.
sa_flags
&
SA_ONSTACK
)
&&
&&
(
!
on_sig_stack
(
regs
->
gpr
[
1
])))
(
!
on_sig_stack
(
regs
->
gpr
[
1
])))
newsp
=
(
current
->
sas_ss_sp
+
current
->
sas_ss_size
);
newsp
=
(
current
->
sas_ss_sp
+
current
->
sas_ss_size
);
else
else
newsp
=
regs
->
gpr
[
1
];
newsp
=
regs
->
gpr
[
1
];
newsp
=
frame
=
newsp
-
sizeof
(
struct
sigregs32
);
newsp
=
frame
=
newsp
-
sizeof
(
struct
sigregs32
);
/* Whee! Actually deliver the signal. */
/* Whee! Actually deliver the signal. */
handle_signal32
(
signr
,
&
info
,
oldset
,
regs
,
&
newsp
,
frame
);
handle_signal32
(
signr
,
&
info
,
oldset
,
regs
,
&
newsp
,
frame
);
}
}
...
@@ -1169,6 +1124,5 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
...
@@ -1169,6 +1124,5 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
setup_rt_frame32
(
regs
,
(
struct
sigregs32
*
)(
u64
)
frame
,
newsp
);
setup_rt_frame32
(
regs
,
(
struct
sigregs32
*
)(
u64
)
frame
,
newsp
);
else
else
setup_frame32
(
regs
,
(
struct
sigregs32
*
)(
u64
)
frame
,
newsp
);
setup_frame32
(
regs
,
(
struct
sigregs32
*
)(
u64
)
frame
,
newsp
);
return
1
;
return
1
;
}
}
arch/ppc64/kernel/smp.c
View file @
9bc8ec89
...
@@ -595,12 +595,8 @@ void __init smp_boot_cpus(void)
...
@@ -595,12 +595,8 @@ void __init smp_boot_cpus(void)
}
}
/*
/*
* XXX very rough. On POWER4 we optimise tlb flushes for
* XXX very rough.
* tasks that only run on one cpu so we increase decay ticks.
*/
*/
if
(
__is_processor
(
PV_POWER4
))
cache_decay_ticks
=
HZ
/
50
;
else
cache_decay_ticks
=
HZ
/
100
;
cache_decay_ticks
=
HZ
/
100
;
/* Probe arch for CPUs */
/* Probe arch for CPUs */
...
...
arch/ppc64/mm/init.c
View file @
9bc8ec89
/*
/*
*
*
* PowerPC version
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
*
...
@@ -62,8 +60,6 @@
...
@@ -62,8 +60,6 @@
#include <asm/ppcdebug.h>
#include <asm/ppcdebug.h>
#define PGTOKB(pages) (((pages) * PAGE_SIZE) >> 10)
#ifdef CONFIG_PPC_ISERIES
#ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/iSeries_dma.h>
#include <asm/iSeries/iSeries_dma.h>
#endif
#endif
...
@@ -78,13 +74,10 @@ extern pgd_t swapper_pg_dir[];
...
@@ -78,13 +74,10 @@ extern pgd_t swapper_pg_dir[];
extern
char
__init_begin
,
__init_end
;
extern
char
__init_begin
,
__init_end
;
extern
char
__chrp_begin
,
__chrp_end
;
extern
char
__chrp_begin
,
__chrp_end
;
extern
char
__openfirmware_begin
,
__openfirmware_end
;
extern
char
__openfirmware_begin
,
__openfirmware_end
;
extern
struct
_of_tce_table
of_tce_table
[];
extern
char
_start
[],
_end
[];
extern
char
_start
[],
_end
[];
extern
char
_stext
[],
etext
[];
extern
char
_stext
[],
etext
[];
extern
struct
task_struct
*
current_set
[
NR_CPUS
];
extern
struct
task_struct
*
current_set
[
NR_CPUS
];
void
mm_init_ppc64
(
void
);
extern
pgd_t
ioremap_dir
[];
extern
pgd_t
ioremap_dir
[];
pgd_t
*
ioremap_pgd
=
(
pgd_t
*
)
&
ioremap_dir
;
pgd_t
*
ioremap_pgd
=
(
pgd_t
*
)
&
ioremap_dir
;
...
@@ -120,13 +113,10 @@ void show_mem(void)
...
@@ -120,13 +113,10 @@ void show_mem(void)
reserved
++
;
reserved
++
;
else
if
(
PageSwapCache
(
mem_map
+
i
))
else
if
(
PageSwapCache
(
mem_map
+
i
))
cached
++
;
cached
++
;
else
if
(
!
atomic_read
(
&
mem_map
[
i
].
count
))
else
if
(
page_count
(
mem_map
+
i
))
free
++
;
shared
+=
page_count
(
mem_map
+
i
)
-
1
;
else
shared
+=
atomic_read
(
&
mem_map
[
i
].
count
)
-
1
;
}
}
printk
(
"%d pages of RAM
\n
"
,
total
);
printk
(
"%d pages of RAM
\n
"
,
total
);
printk
(
"%d free pages
\n
"
,
free
);
printk
(
"%d reserved pages
\n
"
,
reserved
);
printk
(
"%d reserved pages
\n
"
,
reserved
);
printk
(
"%d pages shared
\n
"
,
shared
);
printk
(
"%d pages shared
\n
"
,
shared
);
printk
(
"%d pages swap cached
\n
"
,
cached
);
printk
(
"%d pages swap cached
\n
"
,
cached
);
...
@@ -302,7 +292,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
...
@@ -302,7 +292,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
}
}
}
}
struct
tlb_batch_data
tlb_batch_array
[
NR_CPUS
][
MAX_BATCH_FLUSH
];
struct
ppc64_tlb_batch
ppc64_tlb_batch
[
NR_CPUS
];
void
void
__flush_tlb_range
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
__flush_tlb_range
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
...
@@ -312,81 +302,69 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
...
@@ -312,81 +302,69 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
pte_t
*
ptep
;
pte_t
*
ptep
;
pte_t
pte
;
pte_t
pte
;
unsigned
long
pgd_end
,
pmd_end
;
unsigned
long
pgd_end
,
pmd_end
;
unsigned
long
context
;
unsigned
long
context
=
0
;
int
i
=
0
;
struct
ppc64_tlb_batch
*
batch
=
&
ppc64_tlb_batch
[
smp_processor_id
()]
;
struct
tlb_batch_data
*
ptes
=
&
tlb_batch_array
[
smp_processor_id
()][
0
]
;
unsigned
long
i
=
0
;
int
local
=
0
;
int
local
=
0
;
if
(
start
>=
end
)
switch
(
REGION_ID
(
start
))
{
panic
(
"flush_tlb_range: start (%016lx) greater than end (%016lx)
\n
"
,
start
,
end
);
if
(
REGION_ID
(
start
)
!=
REGION_ID
(
end
)
)
panic
(
"flush_tlb_range: start (%016lx) and end (%016lx) not in same region
\n
"
,
start
,
end
);
context
=
0
;
switch
(
REGION_ID
(
start
)
)
{
case
VMALLOC_REGION_ID
:
case
VMALLOC_REGION_ID
:
pgd
=
pgd_offset_k
(
start
);
pgd
=
pgd_offset_k
(
start
);
break
;
break
;
case
IO_REGION_ID
:
case
IO_REGION_ID
:
pgd
=
pgd_offset_i
(
start
);
pgd
=
pgd_offset_i
(
start
);
break
;
break
;
case
USER_REGION_ID
:
case
USER_REGION_ID
:
pgd
=
pgd_offset
(
mm
,
start
);
pgd
=
pgd_offset
(
mm
,
start
);
context
=
mm
->
context
;
context
=
mm
->
context
;
/* XXX are there races with checking cpu_vm_mask? - Anton */
/* XXX are there races with checking cpu_vm_mask? - Anton */
if
(
mm
->
cpu_vm_mask
==
(
1
<<
smp_processor_id
()))
{
if
(
mm
->
cpu_vm_mask
==
(
1
<<
smp_processor_id
()))
local
=
1
;
local
=
1
;
}
break
;
break
;
default:
default:
panic
(
"flush_tlb_range: invalid region for start (%016lx) and end (%016lx)
\n
"
,
start
,
end
);
panic
(
"flush_tlb_range: invalid region for start (%016lx) and end (%016lx)
\n
"
,
start
,
end
);
}
}
do
{
do
{
pgd_end
=
(
start
+
PGDIR_SIZE
)
&
PGDIR_MASK
;
pgd_end
=
(
start
+
PGDIR_SIZE
)
&
PGDIR_MASK
;
if
(
pgd_end
>
end
)
if
(
pgd_end
>
end
)
pgd_end
=
end
;
pgd_end
=
end
;
if
(
!
pgd_none
(
*
pgd
)
)
{
if
(
!
pgd_none
(
*
pgd
)
)
{
pmd
=
pmd_offset
(
pgd
,
start
);
pmd
=
pmd_offset
(
pgd
,
start
);
do
{
do
{
pmd_end
=
(
start
+
PMD_SIZE
)
&
PMD_MASK
;
pmd_end
=
(
start
+
PMD_SIZE
)
&
PMD_MASK
;
if
(
pmd_end
>
end
)
if
(
pmd_end
>
end
)
pmd_end
=
end
;
pmd_end
=
end
;
if
(
!
pmd_none
(
*
pmd
)
)
{
if
(
!
pmd_none
(
*
pmd
)
)
{
ptep
=
pte_offset_kernel
(
pmd
,
start
);
ptep
=
pte_offset_kernel
(
pmd
,
start
);
do
{
do
{
if
(
pte_val
(
*
ptep
)
&
_PAGE_HASHPTE
)
{
if
(
pte_val
(
*
ptep
)
&
_PAGE_HASHPTE
)
{
pte
=
__pte
(
pte_update
(
ptep
,
_PAGE_HPTEFLAGS
,
0
));
pte
=
__pte
(
pte_update
(
ptep
,
_PAGE_HPTEFLAGS
,
0
));
if
(
pte_val
(
pte
)
&
_PAGE_HASHPTE
)
{
if
(
pte_val
(
pte
)
&
_PAGE_HASHPTE
)
{
ptes
->
pte
=
pte
;
batch
->
pte
[
i
]
=
pte
;
ptes
->
addr
=
start
;
batch
->
addr
[
i
]
=
start
;
ptes
++
;
i
++
;
i
++
;
if
(
i
==
MAX_BATCH_FLUSH
)
{
if
(
i
==
PPC64_TLB_BATCH_NR
)
{
flush_hash_range
(
context
,
MAX_BATCH_FLUSH
,
local
);
flush_hash_range
(
context
,
i
,
local
);
i
=
0
;
i
=
0
;
ptes
=
&
tlb_batch_array
[
smp_processor_id
()][
0
];
}
}
}
}
}
}
start
+=
PAGE_SIZE
;
start
+=
PAGE_SIZE
;
++
ptep
;
++
ptep
;
}
while
(
start
<
pmd_end
);
}
while
(
start
<
pmd_end
);
}
}
else
{
else
start
=
pmd_end
;
start
=
pmd_end
;
++
pmd
;
}
while
(
start
<
pgd_end
);
}
}
else
++
pmd
;
}
while
(
start
<
pgd_end
);
}
else
{
start
=
pgd_end
;
start
=
pgd_end
;
}
++
pgd
;
++
pgd
;
}
while
(
start
<
end
);
}
while
(
start
<
end
);
if
(
i
)
if
(
i
)
flush_hash_range
(
context
,
i
,
local
);
flush_hash_range
(
context
,
i
,
local
);
...
@@ -463,7 +441,6 @@ void __init do_init_bootmem(void)
...
@@ -463,7 +441,6 @@ void __init do_init_bootmem(void)
unsigned
long
start
,
bootmap_pages
;
unsigned
long
start
,
bootmap_pages
;
unsigned
long
total_pages
=
lmb_end_of_DRAM
()
>>
PAGE_SHIFT
;
unsigned
long
total_pages
=
lmb_end_of_DRAM
()
>>
PAGE_SHIFT
;
PPCDBG
(
PPCDBG_MMINIT
,
"do_init_bootmem: start
\n
"
);
/*
/*
* Find an area to use for the bootmem bitmap. Calculate the size of
* Find an area to use for the bootmem bitmap. Calculate the size of
* bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
* bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
...
@@ -472,21 +449,16 @@ void __init do_init_bootmem(void)
...
@@ -472,21 +449,16 @@ void __init do_init_bootmem(void)
bootmap_pages
=
bootmem_bootmap_pages
(
total_pages
);
bootmap_pages
=
bootmem_bootmap_pages
(
total_pages
);
start
=
(
unsigned
long
)
__a2p
(
lmb_alloc
(
bootmap_pages
<<
PAGE_SHIFT
,
PAGE_SIZE
));
start
=
(
unsigned
long
)
__a2p
(
lmb_alloc
(
bootmap_pages
<<
PAGE_SHIFT
,
PAGE_SIZE
));
if
(
start
==
0
)
{
if
(
start
==
0
)
{
udbg_printf
(
"do_init_bootmem: failed to allocate a bitmap.
\n
"
);
udbg_printf
(
"do_init_bootmem: failed to allocate a bitmap.
\n
"
);
udbg_printf
(
"
\t
bootmap_pages = 0x%lx.
\n
"
,
bootmap_pages
);
udbg_printf
(
"
\t
bootmap_pages = 0x%lx.
\n
"
,
bootmap_pages
);
PPCDBG_ENTER_DEBUGGER
();
PPCDBG_ENTER_DEBUGGER
();
}
}
PPCDBG
(
PPCDBG_MMINIT
,
"
\t
start = 0x%lx
\n
"
,
start
);
PPCDBG
(
PPCDBG_MMINIT
,
"
\t
bootmap_pages = 0x%lx
\n
"
,
bootmap_pages
);
PPCDBG
(
PPCDBG_MMINIT
,
"
\t
physicalMemorySize = 0x%lx
\n
"
,
naca
->
physicalMemorySize
);
boot_mapsize
=
init_bootmem
(
start
>>
PAGE_SHIFT
,
total_pages
);
boot_mapsize
=
init_bootmem
(
start
>>
PAGE_SHIFT
,
total_pages
);
PPCDBG
(
PPCDBG_MMINIT
,
"
\t
boot_mapsize = 0x%lx
\n
"
,
boot_mapsize
);
/* add all physical memory to the bootmem map */
/* add all physical memory to the bootmem map */
for
(
i
=
0
;
i
<
lmb
.
memory
.
cnt
;
i
++
)
{
for
(
i
=
0
;
i
<
lmb
.
memory
.
cnt
;
i
++
)
{
unsigned
long
physbase
,
size
;
unsigned
long
physbase
,
size
;
unsigned
long
type
=
lmb
.
memory
.
region
[
i
].
type
;
unsigned
long
type
=
lmb
.
memory
.
region
[
i
].
type
;
...
@@ -497,19 +469,14 @@ void __init do_init_bootmem(void)
...
@@ -497,19 +469,14 @@ void __init do_init_bootmem(void)
size
=
lmb
.
memory
.
region
[
i
].
size
;
size
=
lmb
.
memory
.
region
[
i
].
size
;
free_bootmem
(
physbase
,
size
);
free_bootmem
(
physbase
,
size
);
}
}
/* reserve the sections we're already using */
/* reserve the sections we're already using */
for
(
i
=
0
;
i
<
lmb
.
reserved
.
cnt
;
i
++
)
{
for
(
i
=
0
;
i
<
lmb
.
reserved
.
cnt
;
i
++
)
{
unsigned
long
physbase
=
lmb
.
reserved
.
region
[
i
].
physbase
;
unsigned
long
physbase
=
lmb
.
reserved
.
region
[
i
].
physbase
;
unsigned
long
size
=
lmb
.
reserved
.
region
[
i
].
size
;
unsigned
long
size
=
lmb
.
reserved
.
region
[
i
].
size
;
#if 0 /* PPPBBB */
if ( (physbase == 0) && (size < (16<<20)) ) {
size = 16 << 20;
}
#endif
reserve_bootmem
(
physbase
,
size
);
reserve_bootmem
(
physbase
,
size
);
}
}
PPCDBG
(
PPCDBG_MMINIT
,
"do_init_bootmem: end
\n
"
);
}
}
/*
/*
...
@@ -522,7 +489,7 @@ void __init paging_init(void)
...
@@ -522,7 +489,7 @@ void __init paging_init(void)
/*
/*
* All pages are DMA-able so we put them all in the DMA zone.
* All pages are DMA-able so we put them all in the DMA zone.
*/
*/
zones_size
[
0
]
=
lmb_end_of_DRAM
()
>>
PAGE_SHIFT
;
zones_size
[
ZONE_DMA
]
=
lmb_end_of_DRAM
()
>>
PAGE_SHIFT
;
for
(
i
=
1
;
i
<
MAX_NR_ZONES
;
i
++
)
for
(
i
=
1
;
i
<
MAX_NR_ZONES
;
i
++
)
zones_size
[
i
]
=
0
;
zones_size
[
i
]
=
0
;
free_area_init
(
zones_size
);
free_area_init
(
zones_size
);
...
@@ -554,14 +521,6 @@ void __init mem_init(void)
...
@@ -554,14 +521,6 @@ void __init mem_init(void)
totalram_pages
+=
free_all_bootmem
();
totalram_pages
+=
free_all_bootmem
();
ifppcdebug
(
PPCDBG_MMINIT
)
{
udbg_printf
(
"mem_init: totalram_pages = 0x%lx
\n
"
,
totalram_pages
);
udbg_printf
(
"mem_init: va_rtas_base = 0x%lx
\n
"
,
va_rtas_base
);
udbg_printf
(
"mem_init: va_rtas_end = 0x%lx
\n
"
,
PAGE_ALIGN
(
va_rtas_base
+
rtas
.
size
));
udbg_printf
(
"mem_init: pinned start = 0x%lx
\n
"
,
__va
(
0
));
udbg_printf
(
"mem_init: pinned end = 0x%lx
\n
"
,
PAGE_ALIGN
(
klimit
));
}
if
(
sysmap_size
)
if
(
sysmap_size
)
for
(
addr
=
(
unsigned
long
)
sysmap
;
for
(
addr
=
(
unsigned
long
)
sysmap
;
addr
<
PAGE_ALIGN
((
unsigned
long
)
sysmap
+
sysmap_size
)
;
addr
<
PAGE_ALIGN
((
unsigned
long
)
sysmap
+
sysmap_size
)
;
...
@@ -613,6 +572,12 @@ void flush_dcache_page(struct page *page)
...
@@ -613,6 +572,12 @@ void flush_dcache_page(struct page *page)
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
)
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
)
{
{
if
(
__is_processor
(
PV_POWER4
))
return
;
if
((
vma
->
vm_flags
&
VM_EXEC
)
==
0
)
return
;
if
(
page
->
mapping
&&
!
PageReserved
(
page
)
if
(
page
->
mapping
&&
!
PageReserved
(
page
)
&&
!
test_bit
(
PG_arch_1
,
&
page
->
flags
))
{
&&
!
test_bit
(
PG_arch_1
,
&
page
->
flags
))
{
__flush_dcache_icache
(
page_address
(
page
));
__flush_dcache_icache
(
page_address
(
page
));
...
@@ -620,14 +585,34 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
...
@@ -620,14 +585,34 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
}
}
}
}
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
)
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
,
struct
page
*
pg
)
{
{
clear_page
(
page
);
clear_page
(
page
);
/* XXX we shouldnt have to do this, but glibc requires it */
if
(
__is_processor
(
PV_POWER4
))
clear_bit
(
PG_arch_1
,
&
pg
->
flags
);
else
__flush_dcache_icache
(
page
);
}
}
void
copy_user_page
(
void
*
vto
,
void
*
vfrom
,
unsigned
long
vaddr
)
void
copy_user_page
(
void
*
vto
,
void
*
vfrom
,
unsigned
long
vaddr
,
struct
page
*
pg
)
{
{
copy_page
(
vto
,
vfrom
);
copy_page
(
vto
,
vfrom
);
/*
* Unfortunately we havent always marked our GOT and PLT sections
* as executable, so we need to flush all file regions - Anton
*/
#if 0
if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
return;
#endif
if
(
__is_processor
(
PV_POWER4
))
clear_bit
(
PG_arch_1
,
&
pg
->
flags
);
else
__flush_dcache_icache
(
vto
);
__flush_dcache_icache
(
vto
);
}
}
...
@@ -642,7 +627,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
...
@@ -642,7 +627,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
extern
pte_t
*
find_linux_pte
(
pgd_t
*
pgdir
,
unsigned
long
ea
);
extern
pte_t
*
find_linux_pte
(
pgd_t
*
pgdir
,
unsigned
long
ea
);
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
pte_t
*
ptep
);
pte_t
*
ptep
,
unsigned
long
trap
);
/*
/*
* This is called at the end of handling a user page fault, when the
* This is called at the end of handling a user page fault, when the
...
@@ -670,5 +655,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
...
@@ -670,5 +655,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ptep
=
find_linux_pte
(
pgdir
,
ea
);
vsid
=
get_vsid
(
vma
->
vm_mm
->
context
,
ea
);
vsid
=
get_vsid
(
vma
->
vm_mm
->
context
,
ea
);
__hash_page
(
ea
,
pte_val
(
pte
)
&
(
_PAGE_USER
|
_PAGE_RW
),
vsid
,
ptep
);
__hash_page
(
ea
,
pte_val
(
pte
)
&
(
_PAGE_USER
|
_PAGE_RW
),
vsid
,
ptep
,
0x300
);
}
}
arch/ppc64/xmon/xmon.c
View file @
9bc8ec89
...
@@ -123,11 +123,7 @@ static void mem_translate(void);
...
@@ -123,11 +123,7 @@ static void mem_translate(void);
static
void
mem_check
(
void
);
static
void
mem_check
(
void
);
static
void
mem_find_real
(
void
);
static
void
mem_find_real
(
void
);
static
void
mem_find_vsid
(
void
);
static
void
mem_find_vsid
(
void
);
static
void
mem_check_pagetable_vsids
(
void
);
static
void
mem_map_check_slab
(
void
);
static
void
mem_map_lock_pages
(
void
);
static
void
mem_check_dup_rpn
(
void
);
static
void
debug_trace
(
void
);
static
void
debug_trace
(
void
);
extern
int
print_insn_big_powerpc
(
FILE
*
,
unsigned
long
,
unsigned
long
);
extern
int
print_insn_big_powerpc
(
FILE
*
,
unsigned
long
,
unsigned
long
);
...
@@ -642,27 +638,15 @@ cmds(struct pt_regs *excp)
...
@@ -642,27 +638,15 @@ cmds(struct pt_regs *excp)
case
'c'
:
case
'c'
:
mem_check
();
mem_check
();
break
;
break
;
case
'j'
:
mem_map_check_slab
();
break
;
case
'f'
:
case
'f'
:
mem_find_real
();
mem_find_real
();
break
;
break
;
case
'e'
:
case
'e'
:
mem_find_vsid
();
mem_find_vsid
();
break
;
break
;
case
'r'
:
mem_check_dup_rpn
();
break
;
case
'i'
:
case
'i'
:
show_mem
();
show_mem
();
break
;
break
;
case
'o'
:
mem_check_pagetable_vsids
();
break
;
case
'q'
:
mem_map_lock_pages
()
;
break
;
default:
default:
termch
=
cmd
;
termch
=
cmd
;
memex
();
memex
();
...
@@ -2458,249 +2442,6 @@ void mem_find_vsid()
...
@@ -2458,249 +2442,6 @@ void mem_find_vsid()
printf
(
"
\n
Done -------------------
\n
"
);
printf
(
"
\n
Done -------------------
\n
"
);
}
}
void
mem_map_check_slab
()
{
int
i
,
slab_count
;
i
=
max_mapnr
;
slab_count
=
0
;
while
(
i
--
>
0
)
{
if
(
PageSlab
(
mem_map
+
i
)){
printf
(
" slab entry - mem_map entry =%p
\n
"
,
mem_map
+
i
);
slab_count
++
;
}
}
printf
(
" count of pages for slab = %d
\n
"
,
slab_count
);
}
void
mem_map_lock_pages
()
{
int
i
,
lock_count
;
i
=
max_mapnr
;
lock_count
=
0
;
while
(
i
--
>
0
)
{
if
(
PageLocked
(
mem_map
+
i
)){
printf
(
" locked entry - mem_map entry =%p
\n
"
,
mem_map
+
i
);
lock_count
++
;
}
}
printf
(
" count of locked pages = %d
\n
"
,
lock_count
);
}
void
mem_check_dup_rpn
()
{
unsigned
long
htab_size_bytes
;
unsigned
long
htab_end
;
unsigned
long
last_rpn
;
HPTE
*
hpte1
,
*
hpte2
;
int
dup_count
;
struct
task_struct
*
p
;
unsigned
long
kernel_vsid_c0
,
kernel_vsid_c1
,
kernel_vsid_c2
,
kernel_vsid_c3
;
unsigned
long
kernel_vsid_c4
,
kernel_vsid_c5
,
kernel_vsid_d
,
kernel_vsid_e
;
unsigned
long
kernel_vsid_f
;
unsigned
long
vsid0
,
vsid1
,
vsidB
,
vsid2
;
htab_size_bytes
=
htab_data
.
htab_num_ptegs
*
128
;
// 128B / PTEG
htab_end
=
(
unsigned
long
)
htab_data
.
htab
+
htab_size_bytes
;
// last_rpn = (naca->physicalMemorySize-1) >> PAGE_SHIFT;
last_rpn
=
0xfffff
;
printf
(
"
\n
Hardware Page Table Check
\n
-------------------
\n
"
);
printf
(
"htab base : %.16lx
\n
"
,
htab_data
.
htab
);
printf
(
"htab size : %.16lx
\n
"
,
htab_size_bytes
);
for
(
hpte1
=
htab_data
.
htab
;
hpte1
<
(
HPTE
*
)
htab_end
;
hpte1
++
)
{
if
(
hpte1
->
dw0
.
dw0
.
v
!=
0
)
{
if
(
hpte1
->
dw1
.
dw1
.
rpn
<=
last_rpn
)
{
dup_count
=
0
;
for
(
hpte2
=
hpte1
+
1
;
hpte2
<
(
HPTE
*
)
htab_end
;
hpte2
++
)
{
if
(
hpte2
->
dw0
.
dw0
.
v
!=
0
)
{
if
(
hpte1
->
dw1
.
dw1
.
rpn
==
hpte2
->
dw1
.
dw1
.
rpn
)
{
dup_count
++
;
}
}
}
if
(
dup_count
>
5
)
{
printf
(
" Duplicate rpn: %.13lx
\n
"
,
(
hpte1
->
dw1
.
dw1
.
rpn
));
printf
(
" mem map array entry %p count = %d
\n
"
,
(
mem_map
+
(
hpte1
->
dw1
.
dw1
.
rpn
)),
(
mem_map
+
(
hpte1
->
dw1
.
dw1
.
rpn
))
->
count
);
for
(
hpte2
=
hpte1
+
1
;
hpte2
<
(
HPTE
*
)
htab_end
;
hpte2
++
)
{
if
(
hpte2
->
dw0
.
dw0
.
v
!=
0
)
{
if
(
hpte1
->
dw1
.
dw1
.
rpn
==
hpte2
->
dw1
.
dw1
.
rpn
)
{
printf
(
" hpte2: %16.16lx *hpte2: %16.16lx %16.16lx
\n
"
,
hpte2
,
hpte2
->
dw0
.
dword0
,
hpte2
->
dw1
.
dword1
);
}
}
}
}
}
else
{
printf
(
" Bogus rpn: %.13lx
\n
"
,
(
hpte1
->
dw1
.
dw1
.
rpn
));
printf
(
" hpte: %16.16lx *hpte: %16.16lx %16.16lx
\n
"
,
hpte1
,
hpte1
->
dw0
.
dword0
,
hpte1
->
dw1
.
dword1
);
}
}
if
(
xmon_interrupted
())
return
;
}
// print the kernel vsids
kernel_vsid_c0
=
get_kernel_vsid
(
0xC000000000000000
);
kernel_vsid_c1
=
get_kernel_vsid
(
0xC000000010000000
);
kernel_vsid_c2
=
get_kernel_vsid
(
0xC000000020000000
);
kernel_vsid_c3
=
get_kernel_vsid
(
0xC000000030000000
);
kernel_vsid_c4
=
get_kernel_vsid
(
0xC000000040000000
);
kernel_vsid_c5
=
get_kernel_vsid
(
0xC000000050000000
);
kernel_vsid_d
=
get_kernel_vsid
(
0xD000000000000000
);
kernel_vsid_e
=
get_kernel_vsid
(
0xE000000000000000
);
kernel_vsid_f
=
get_kernel_vsid
(
0xF000000000000000
);
printf
(
" kernel vsid - seg c0 = %lx
\n
"
,
kernel_vsid_c0
);
printf
(
" kernel vsid - seg c1 = %lx
\n
"
,
kernel_vsid_c1
);
printf
(
" kernel vsid - seg c2 = %lx
\n
"
,
kernel_vsid_c2
);
printf
(
" kernel vsid - seg c3 = %lx
\n
"
,
kernel_vsid_c3
);
printf
(
" kernel vsid - seg c4 = %lx
\n
"
,
kernel_vsid_c4
);
printf
(
" kernel vsid - seg c5 = %lx
\n
"
,
kernel_vsid_c5
);
printf
(
" kernel vsid - seg d = %lx
\n
"
,
kernel_vsid_d
);
printf
(
" kernel vsid - seg e = %lx
\n
"
,
kernel_vsid_e
);
printf
(
" kernel vsid - seg f = %lx
\n
"
,
kernel_vsid_f
);
// print a list of valid vsids for the tasks
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
if
(
p
->
mm
)
{
struct
mm_struct
*
mm
=
p
->
mm
;
printf
(
" task = %p mm = %lx pgd %lx
\n
"
,
p
,
mm
,
mm
->
pgd
);
vsid0
=
get_vsid
(
mm
->
context
,
0
);
vsid1
=
get_vsid
(
mm
->
context
,
0x10000000
);
vsid2
=
get_vsid
(
mm
->
context
,
0x20000000
);
vsidB
=
get_vsid
(
mm
->
context
,
0xB0000000
);
printf
(
" context = %lx vsid seg 0 = %lx
\n
"
,
mm
->
context
,
vsid0
);
printf
(
" vsid seg 1 = %lx
\n
"
,
vsid1
);
printf
(
" vsid seg 2 = %lx
\n
"
,
vsid2
);
printf
(
" vsid seg 2 = %lx
\n
"
,
vsidB
);
printf
(
"
\n
"
);
};
read_unlock
(
&
tasklist_lock
);
printf
(
"
\n
Done -------------------
\n
"
);
}
void
mem_check_pagetable_vsids
()
{
unsigned
long
htab_size_bytes
;
unsigned
long
htab_end
;
unsigned
long
last_rpn
;
struct
task_struct
*
p
;
unsigned
long
valid_table_count
,
invalid_table_count
,
bogus_rpn_count
;
int
found
;
unsigned
long
user_address_table_count
,
kernel_page_table_count
;
unsigned
long
pt_vsid
;
HPTE
*
hpte1
;
htab_size_bytes
=
htab_data
.
htab_num_ptegs
*
128
;
// 128B / PTEG
htab_end
=
(
unsigned
long
)
htab_data
.
htab
+
htab_size_bytes
;
// last_rpn = (naca->physicalMemorySize-1) >> PAGE_SHIFT;
last_rpn
=
0xfffff
;
printf
(
"
\n
Hardware Page Table Check
\n
-------------------
\n
"
);
printf
(
"htab base : %.16lx
\n
"
,
htab_data
.
htab
);
printf
(
"htab size : %.16lx
\n
"
,
htab_size_bytes
);
valid_table_count
=
0
;
invalid_table_count
=
0
;
bogus_rpn_count
=
0
;
user_address_table_count
=
0
;
kernel_page_table_count
=
0
;
for
(
hpte1
=
htab_data
.
htab
;
hpte1
<
(
HPTE
*
)
htab_end
;
hpte1
++
)
{
if
(
hpte1
->
dw0
.
dw0
.
v
!=
0
)
{
valid_table_count
++
;
if
(
hpte1
->
dw1
.
dw1
.
rpn
<=
last_rpn
)
{
pt_vsid
=
(
hpte1
->
dw0
.
dw0
.
avpn
)
>>
5
;
if
((
pt_vsid
==
get_kernel_vsid
(
0xC000000000000000
))
|
(
pt_vsid
==
get_kernel_vsid
(
0xC000000010000000
))
|
(
pt_vsid
==
get_kernel_vsid
(
0xC000000020000000
))
|
(
pt_vsid
==
get_kernel_vsid
(
0xC000000030000000
))
|
(
pt_vsid
==
get_kernel_vsid
(
0xC000000040000000
))
|
(
pt_vsid
==
get_kernel_vsid
(
0xC000000050000000
))
|
(
pt_vsid
==
get_kernel_vsid
(
0xD000000000000000
))
|
(
pt_vsid
==
get_kernel_vsid
(
0xE000000000000000
))
|
(
pt_vsid
==
get_kernel_vsid
(
0xF000000000000000
))
)
{
kernel_page_table_count
++
;
}
else
{
read_lock
(
&
tasklist_lock
);
found
=
0
;
for_each_task
(
p
)
{
if
(
p
->
mm
&&
(
found
==
0
))
{
struct
mm_struct
*
mm
=
p
->
mm
;
if
((
pt_vsid
==
get_vsid
(
mm
->
context
,
0
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x10000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x20000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x30000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x40000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x50000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x60000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x70000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x80000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0x90000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0xA0000000
))
|
(
pt_vsid
==
get_vsid
(
mm
->
context
,
0xB0000000
)))
{
user_address_table_count
++
;
found
=
1
;
}
}
}
read_unlock
(
&
tasklist_lock
);
if
(
found
==
0
)
{
printf
(
" vsid not found vsid = %lx, hpte = %p
\n
"
,
pt_vsid
,
hpte1
);
printf
(
" rpn in entry = %lx
\n
"
,
hpte1
->
dw1
.
dw1
.
rpn
);
printf
(
" mem map address = %lx
\n
"
,
mem_map
+
(
hpte1
->
dw1
.
dw1
.
rpn
));
}
else
// found
{
}
}
// good rpn
}
else
{
bogus_rpn_count
++
;
}
}
else
{
invalid_table_count
++
;
}
}
printf
(
" page table valid counts - valid entries = %lx invalid entries = %lx
\n
"
,
valid_table_count
,
invalid_table_count
);
printf
(
" bogus rpn entries ( probably io) = %lx
\n
"
,
bogus_rpn_count
);
printf
(
" page table counts - kernel entries = %lx user entries = %lx
\n
"
,
kernel_page_table_count
,
user_address_table_count
);
printf
(
"
\n
Done -------------------
\n
"
);
}
static
void
debug_trace
(
void
)
{
static
void
debug_trace
(
void
)
{
unsigned
long
val
,
cmd
,
on
;
unsigned
long
val
,
cmd
,
on
;
...
...
include/asm-ppc64/bitops.h
View file @
9bc8ec89
...
@@ -259,6 +259,12 @@ static __inline__ int ffs(int x)
...
@@ -259,6 +259,12 @@ static __inline__ int ffs(int x)
return
__ilog2
(
i
&
-
i
)
+
1
;
return
__ilog2
(
i
&
-
i
)
+
1
;
}
}
/*
* fls: find last (most-significant) bit set.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
#define fls(x) generic_fls(x)
/*
/*
* hweightN: returns the hamming weight (i.e. the number
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
* of bits set) of a N-bit word
...
...
include/asm-ppc64/mmzone.h
0 → 100644
View file @
9bc8ec89
/*
* Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
*
* PowerPC64 port:
* Copyright (C) 2002 Anton Blanchard, IBM Corp.
*/
#ifndef _ASM_MMZONE_H_
#define _ASM_MMZONE_H_
#include <linux/config.h>
typedef
struct
plat_pglist_data
{
pg_data_t
gendata
;
}
plat_pg_data_t
;
/*
* Following are macros that are specific to this numa platform.
*/
extern
plat_pg_data_t
plat_node_data
[];
#define MAX_NUMNODES 4
/* XXX grab this from the device tree - Anton */
#define PHYSADDR_TO_NID(pa) ((pa) >> 36)
#define PLAT_NODE_DATA(n) (&plat_node_data[(n)])
#define PLAT_NODE_DATA_STARTNR(n) \
(PLAT_NODE_DATA(n)->gendata.node_start_mapnr)
#define PLAT_NODE_DATA_SIZE(n) (PLAT_NODE_DATA(n)->gendata.node_size)
#define PLAT_NODE_DATA_LOCALNR(p, n) \
(((p) - PLAT_NODE_DATA(n)->gendata.node_start_paddr) >> PAGE_SHIFT)
#ifdef CONFIG_DISCONTIGMEM
/*
* Following are macros that each numa implmentation must define.
*/
/*
* Given a kernel address, find the home node of the underlying memory.
*/
#define KVADDR_TO_NID(kaddr) PHYSADDR_TO_NID(__pa(kaddr))
/*
* Return a pointer to the node data for node n.
*/
#define NODE_DATA(n) (&((PLAT_NODE_DATA(n))->gendata))
/*
* NODE_MEM_MAP gives the kaddr for the mem_map of the node.
*/
#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
/*
* Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
* and returns the mem_map of that node.
*/
#define ADDR_TO_MAPBASE(kaddr) \
NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
/*
* Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
* and returns the kaddr corresponding to first physical page in the
* node's mem_map.
*/
#define LOCAL_BASE_ADDR(kaddr) \
((unsigned long)__va(NODE_DATA(KVADDR_TO_NID(kaddr))->node_start_paddr))
#define LOCAL_MAP_NR(kvaddr) \
(((unsigned long)(kvaddr)-LOCAL_BASE_ADDR(kvaddr)) >> PAGE_SHIFT)
#if 0
/* XXX fix - Anton */
#define kern_addr_valid(kaddr) test_bit(LOCAL_MAP_NR(kaddr), \
NODE_DATA(KVADDR_TO_NID(kaddr))->valid_addr_bitmap)
#endif
#define discontigmem_pfn_to_page(pfn) \
({ \
unsigned long kaddr = (unsigned long)__va(pfn << PAGE_SHIFT); \
(ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr)); \
})
#ifdef CONFIG_NUMA
/* XXX grab this from the device tree - Anton */
#define cputonode(cpu) ((cpu) >> 3)
#define numa_node_id() cputonode(smp_processor_id())
#endif
/* CONFIG_NUMA */
#endif
/* CONFIG_DISCONTIGMEM */
#endif
/* _ASM_MMZONE_H_ */
include/asm-ppc64/page.h
View file @
9bc8ec89
...
@@ -70,8 +70,8 @@ static __inline__ void clear_page(void *addr)
...
@@ -70,8 +70,8 @@ static __inline__ void clear_page(void *addr)
extern
void
copy_page
(
void
*
to
,
void
*
from
);
extern
void
copy_page
(
void
*
to
,
void
*
from
);
struct
page
;
struct
page
;
extern
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
);
extern
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
,
struct
page
*
pg
);
extern
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
vaddr
);
extern
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
vaddr
,
struct
page
*
p
);
#ifdef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
/*
/*
...
@@ -215,8 +215,15 @@ static inline int get_order(unsigned long size)
...
@@ -215,8 +215,15 @@ static inline int get_order(unsigned long size)
#define __a2p(x) ((void *) absolute_to_phys(x))
#define __a2p(x) ((void *) absolute_to_phys(x))
#define __a2v(x) ((void *) __va(absolute_to_phys(x)))
#define __a2v(x) ((void *) __va(absolute_to_phys(x)))
#ifdef CONFIG_DISCONTIGMEM
#define page_to_pfn(page) \
((page) - page_zone(page)->zone_mem_map + \
(page_zone(page)->zone_start_paddr >> PAGE_SHIFT))
#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
#else
#define pfn_to_page(pfn) (mem_map + (pfn))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(pfn) ((unsigned long)((pfn) - mem_map))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
...
...
include/asm-ppc64/pgalloc.h
View file @
9bc8ec89
...
@@ -53,6 +53,8 @@ pmd_free(pmd_t *pmd)
...
@@ -53,6 +53,8 @@ pmd_free(pmd_t *pmd)
free_page
((
unsigned
long
)
pmd
);
free_page
((
unsigned
long
)
pmd
);
}
}
#define pmd_free_tlb(tlb, pmd) pmd_free(pmd)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
#define pmd_populate(mm, pmd, pte_page) \
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
pmd_populate_kernel(mm, pmd, page_address(pte_page))
...
@@ -86,6 +88,7 @@ pte_free_kernel(pte_t *pte)
...
@@ -86,6 +88,7 @@ pte_free_kernel(pte_t *pte)
}
}
#define pte_free(pte_page) pte_free_kernel(page_address(pte_page))
#define pte_free(pte_page) pte_free_kernel(page_address(pte_page))
#define pte_free_tlb(tlb, pte) pte_free(pte)
#define check_pgt_cache() do { } while (0)
#define check_pgt_cache() do { } while (0)
...
...
include/asm-ppc64/tlb.h
View file @
9bc8ec89
/*
* TLB shootdown specifics for PPC64
*
* Copyright (C) 2002 Anton Blanchard, IBM Corp.
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _PPC64_TLB_H
#define _PPC64_TLB_H
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/mmu.h>
struct
free_pte_ctx
;
static
inline
void
tlb_flush
(
struct
free_pte_ctx
*
tlb
);
/* Get the generic bits... */
#include <asm-generic/tlb.h>
#include <asm-generic/tlb.h>
/* Nothing needed here in fact... */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
/* Should make this at least as large as the generic batch size, but it
* takes up too much space */
#define PPC64_TLB_BATCH_NR 192
struct
ppc64_tlb_batch
{
unsigned
long
index
;
pte_t
pte
[
PPC64_TLB_BATCH_NR
];
unsigned
long
addr
[
PPC64_TLB_BATCH_NR
];
unsigned
long
vaddr
[
PPC64_TLB_BATCH_NR
];
};
extern
struct
ppc64_tlb_batch
ppc64_tlb_batch
[
NR_CPUS
];
static
inline
void
tlb_remove_tlb_entry
(
mmu_gather_t
*
tlb
,
pte_t
*
ptep
,
unsigned
long
address
)
{
int
cpu
=
smp_processor_id
();
struct
ppc64_tlb_batch
*
batch
=
&
ppc64_tlb_batch
[
cpu
];
unsigned
long
i
=
batch
->
index
;
pte_t
pte
;
if
(
pte_val
(
*
ptep
)
&
_PAGE_HASHPTE
)
{
pte
=
__pte
(
pte_update
(
ptep
,
_PAGE_HPTEFLAGS
,
0
));
if
(
pte_val
(
pte
)
&
_PAGE_HASHPTE
)
{
int
local
=
0
;
if
(
tlb
->
mm
->
cpu_vm_mask
==
(
1
<<
cpu
))
local
=
1
;
batch
->
pte
[
i
]
=
pte
;
batch
->
addr
[
i
]
=
address
;
i
++
;
if
(
i
==
PPC64_TLB_BATCH_NR
)
{
flush_hash_range
(
tlb
->
mm
->
context
,
i
,
local
);
i
=
0
;
}
}
}
batch
->
index
=
i
;
}
static
inline
void
tlb_flush
(
struct
free_pte_ctx
*
tlb
)
{
int
cpu
=
smp_processor_id
();
struct
ppc64_tlb_batch
*
batch
=
&
ppc64_tlb_batch
[
cpu
];
int
local
=
0
;
if
(
tlb
->
mm
->
cpu_vm_mask
==
(
1
<<
smp_processor_id
()))
local
=
1
;
flush_hash_range
(
tlb
->
mm
->
context
,
batch
->
index
,
local
);
batch
->
index
=
0
;
}
#endif
/* _PPC64_TLB_H */
include/asm-ppc64/tlbflush.h
View file @
9bc8ec89
...
@@ -35,12 +35,4 @@ extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
...
@@ -35,12 +35,4 @@ extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
int
local
);
int
local
);
void
flush_hash_range
(
unsigned
long
context
,
unsigned
long
number
,
int
local
);
void
flush_hash_range
(
unsigned
long
context
,
unsigned
long
number
,
int
local
);
/* TLB flush batching */
#define MAX_BATCH_FLUSH 128
struct
tlb_batch_data
{
pte_t
pte
;
unsigned
long
addr
;
};
extern
struct
tlb_batch_data
tlb_batch_array
[
NR_CPUS
][
MAX_BATCH_FLUSH
];
#endif
/* _PPC64_TLBFLUSH_H */
#endif
/* _PPC64_TLBFLUSH_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment