Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
2cb54cac
Commit
2cb54cac
authored
Aug 03, 2002
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/scratch/anton/linux-2.5
into samba.org:/scratch/anton/linux-2.5_work
parents
cd03e0b9
7a3182b1
Changes
21
Show whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
393 additions
and
350 deletions
+393
-350
arch/ppc64/defconfig
arch/ppc64/defconfig
+19
-3
arch/ppc64/kernel/i8259.c
arch/ppc64/kernel/i8259.c
+2
-1
arch/ppc64/kernel/irq.c
arch/ppc64/kernel/irq.c
+6
-13
arch/ppc64/kernel/misc.S
arch/ppc64/kernel/misc.S
+10
-7
arch/ppc64/kernel/open_pic.c
arch/ppc64/kernel/open_pic.c
+1
-1
arch/ppc64/kernel/pSeries_lpar.c
arch/ppc64/kernel/pSeries_lpar.c
+7
-14
arch/ppc64/kernel/prom.c
arch/ppc64/kernel/prom.c
+1
-0
arch/ppc64/kernel/smp.c
arch/ppc64/kernel/smp.c
+171
-229
arch/ppc64/kernel/sys_ppc32.c
arch/ppc64/kernel/sys_ppc32.c
+16
-14
arch/ppc64/kernel/time.c
arch/ppc64/kernel/time.c
+0
-3
arch/ppc64/kernel/traps.c
arch/ppc64/kernel/traps.c
+2
-3
arch/ppc64/kernel/xics.c
arch/ppc64/kernel/xics.c
+1
-1
include/asm-ppc64/hardirq.h
include/asm-ppc64/hardirq.h
+75
-10
include/asm-ppc64/machdep.h
include/asm-ppc64/machdep.h
+16
-8
include/asm-ppc64/mmzone.h
include/asm-ppc64/mmzone.h
+5
-2
include/asm-ppc64/rwsem.h
include/asm-ppc64/rwsem.h
+39
-1
include/asm-ppc64/smp.h
include/asm-ppc64/smp.h
+0
-1
include/asm-ppc64/softirq.h
include/asm-ppc64/softirq.h
+8
-16
include/asm-ppc64/system.h
include/asm-ppc64/system.h
+6
-14
include/asm-ppc64/thread_info.h
include/asm-ppc64/thread_info.h
+3
-0
include/asm-ppc64/unistd.h
include/asm-ppc64/unistd.h
+5
-9
No files found.
arch/ppc64/defconfig
View file @
2cb54cac
...
...
@@ -487,10 +487,26 @@ CONFIG_VIOPATH=y
#
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
CONFIG_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
# CONFIG_SERIAL_EXTENDED is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
# Serial drivers
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_CS is not set
# CONFIG_SERIAL_8250_EXTENDED is not set
# CONFIG_SERIAL_8250_MANY_PORTS is not set
# CONFIG_SERIAL_8250_SHARE_IRQ is not set
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
# CONFIG_SERIAL_8250_MULTIPORT is not set
# CONFIG_SERIAL_8250_RSA is not set
#
# Non-8250 serial port support
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_UNIX98_PTY_COUNT=256
CONFIG_HVC_CONSOLE=y
...
...
arch/ppc64/kernel/i8259.c
View file @
2cb54cac
...
...
@@ -123,7 +123,8 @@ static void i8259_unmask_irq(unsigned int irq_nr)
static
void
i8259_end_irq
(
unsigned
int
irq
)
{
if
(
!
(
irq_desc
[
irq
].
status
&
(
IRQ_DISABLED
|
IRQ_INPROGRESS
)))
if
(
!
(
irq_desc
[
irq
].
status
&
(
IRQ_DISABLED
|
IRQ_INPROGRESS
))
&&
irq_desc
[
irq
].
action
)
i8259_unmask_irq
(
irq
);
}
...
...
arch/ppc64/kernel/irq.c
View file @
2cb54cac
...
...
@@ -169,10 +169,8 @@ setup_irq(unsigned int irq, struct irqaction * new)
inline
void
synchronize_irq
(
unsigned
int
irq
)
{
while
(
irq_desc
[
irq
].
status
&
IRQ_INPROGRESS
)
{
barrier
();
while
(
irq_desc
[
irq
].
status
&
IRQ_INPROGRESS
)
cpu_relax
();
}
}
#endif
/* CONFIG_SMP */
...
...
@@ -500,7 +498,7 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
* use the action we have.
*/
action
=
NULL
;
if
(
!
(
status
&
(
IRQ_DISABLED
|
IRQ_INPROGRESS
)))
{
if
(
likely
(
!
(
status
&
(
IRQ_DISABLED
|
IRQ_INPROGRESS
)
)))
{
action
=
desc
->
action
;
if
(
!
action
||
!
action
->
handler
)
{
ppc_spurious_interrupts
++
;
...
...
@@ -527,10 +525,9 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
a different instance of this same irq, the other processor
will take care of it.
*/
if
(
!
action
)
if
(
unlikely
(
!
action
)
)
goto
out
;
/*
* Edge triggered interrupts need to remember
* pending events.
...
...
@@ -546,12 +543,12 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
handle_irq_event
(
irq
,
regs
,
action
);
spin_lock
(
&
desc
->
lock
);
if
(
!
(
desc
->
status
&
IRQ_PENDING
))
if
(
likely
(
!
(
desc
->
status
&
IRQ_PENDING
)
))
break
;
desc
->
status
&=
~
IRQ_PENDING
;
}
desc
->
status
&=
~
IRQ_INPROGRESS
;
out:
desc
->
status
&=
~
IRQ_INPROGRESS
;
/*
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
...
...
@@ -567,7 +564,6 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
int
do_IRQ
(
struct
pt_regs
*
regs
)
{
int
cpu
=
smp_processor_id
();
int
irq
,
first
=
1
;
#ifdef CONFIG_PPC_ISERIES
struct
paca_struct
*
lpaca
;
...
...
@@ -615,9 +611,6 @@ int do_IRQ(struct pt_regs *regs)
}
#endif
if
(
softirq_pending
(
cpu
))
do_softirq
();
return
1
;
/* lets ret_from_int know we can do checks */
}
...
...
arch/ppc64/kernel/misc.S
View file @
2cb54cac
...
...
@@ -683,8 +683,8 @@ _GLOBAL(sys_call_table32)
.
llong
.
sys32_rt_sigtimedwait
.
llong
.
sys32_rt_sigqueueinfo
.
llong
.
sys32_rt_sigsuspend
.
llong
.
sys32_pread
.
llong
.
sys32_pwrite
/*
180
*/
.
llong
.
sys32_pread
64
.
llong
.
sys32_pwrite
64
/*
180
*/
.
llong
.
sys_chown
.
llong
.
sys_getcwd
.
llong
.
sys_capget
...
...
@@ -695,7 +695,7 @@ _GLOBAL(sys_call_table32)
.
llong
.
sys_ni_syscall
/*
streams2
*/
.
llong
.
sys32_vfork
.
llong
.
sys32_getrlimit
/*
190
*/
.
llong
.
sys
_ni_syscall
/*
191
*/
/*
Unused
*/
.
llong
.
sys
32_readahead
.
llong
.
sys_ni_syscall
/*
192
-
reserved
-
mmap2
*/
.
llong
.
sys32_truncate64
/*
193
-
truncate64
*/
.
llong
.
sys32_ftruncate64
/*
194
-
ftruncate64
*/
...
...
@@ -726,11 +726,12 @@ _GLOBAL(sys_call_table32)
.
llong
.
sys_lremovexattr
.
llong
.
sys_fremovexattr
/*
220
*/
.
llong
.
sys_futex
.
llong
.
sys_ni_syscall
/*
reserved
for
tux
*/
.
llong
.
sys32_sched_setaffinity
.
llong
.
sys32_sched_getaffinity
.
llong
.
sys_ni_syscall
/*
reserved
for
security
*/
.
llong
.
sys_ni_syscall
/*
225
-
reserved
for
tux
*/
.
rept
NR_syscalls
-
22
4
.
rept
NR_syscalls
-
22
5
.
llong
.
sys_ni_syscall
.
endr
#endif
...
...
@@ -928,6 +929,7 @@ _GLOBAL(sys_call_table)
.
llong
.
sys_ni_syscall
/*
streams2
*/
.
llong
.
sys_vfork
.
llong
.
sys_getrlimit
/*
190
*/
.
llong
.
sys_readahead
.
llong
.
sys_ni_syscall
/*
191
*/
/*
Unused
*/
.
llong
.
sys_ni_syscall
/*
192
-
reserved
-
mmap2
*/
.
llong
.
sys_ni_syscall
/*
193
-
reserved
-
truncate64
*/
...
...
@@ -959,10 +961,11 @@ _GLOBAL(sys_call_table)
.
llong
.
sys_lremovexattr
.
llong
.
sys_fremovexattr
/*
220
*/
.
llong
.
sys_futex
.
llong
.
sys_ni_syscall
/*
reserved
for
tux
*/
.
llong
.
sys_sched_setaffinity
.
llong
.
sys_sched_getaffinity
.
llong
.
sys_ni_syscall
/*
reserved
for
security
*/
.
llong
.
sys_ni_syscall
/*
reserved
for
tux
*/
.
rept
NR_syscalls
-
22
4
.
rept
NR_syscalls
-
22
5
.
llong
.
sys_ni_syscall
.
endr
arch/ppc64/kernel/open_pic.c
View file @
2cb54cac
...
...
@@ -576,7 +576,7 @@ void openpic_request_IPIs(void)
*/
static
spinlock_t
openpic_setup_lock
__initdata
=
SPIN_LOCK_UNLOCKED
;
void
__init
do_openpic_setup_cpu
(
void
)
void
__
dev
init
do_openpic_setup_cpu
(
void
)
{
#ifdef CONFIG_IRQ_ALL_CPUS
int
i
;
...
...
arch/ppc64/kernel/pSeries_lpar.c
View file @
2cb54cac
...
...
@@ -155,7 +155,7 @@ long plpar_pte_protect(unsigned long flags,
unsigned
long
ptex
,
unsigned
long
avpn
)
{
return
plpar_hcall_norets
(
H_PROTECT
,
flags
,
ptex
);
return
plpar_hcall_norets
(
H_PROTECT
,
flags
,
ptex
,
avpn
);
}
long
plpar_tce_get
(
unsigned
long
liobn
,
...
...
@@ -552,6 +552,7 @@ static long pSeries_lpar_insert_hpte(unsigned long hpte_group,
int
secondary
,
unsigned
long
hpteflags
,
int
bolted
,
int
large
)
{
/* XXX fix for large page */
unsigned
long
avpn
=
vpn
>>
11
;
unsigned
long
arpn
=
physRpn_to_absRpn
(
prpn
);
unsigned
long
lpar_rc
;
...
...
@@ -651,11 +652,10 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned
long
va
,
int
large
)
{
unsigned
long
lpar_rc
;
unsigned
long
flags
;
flags
=
(
newpp
&
7
)
|
H_AVPN
;
unsigned
long
vpn
=
va
>>
PAGE_SHIFT
;
unsigned
long
flags
=
(
newpp
&
7
)
|
H_AVPN
;
unsigned
long
avpn
=
va
>>
23
;
lpar_rc
=
plpar_pte_protect
(
flags
,
slot
,
(
vpn
>>
4
)
&
~
0x7fUL
);
lpar_rc
=
plpar_pte_protect
(
flags
,
slot
,
(
avpn
<<
7
)
);
if
(
lpar_rc
==
H_Not_Found
)
{
udbg_printf
(
"updatepp missed
\n
"
);
...
...
@@ -748,18 +748,11 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
static
void
pSeries_lpar_hpte_invalidate
(
unsigned
long
slot
,
unsigned
long
va
,
int
large
,
int
local
)
{
unsigned
long
vpn
,
avpn
;
unsigned
long
avpn
=
va
>>
23
;
unsigned
long
lpar_rc
;
unsigned
long
dummy1
,
dummy2
;
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
else
vpn
=
va
>>
PAGE_SHIFT
;
avpn
=
vpn
>>
11
;
lpar_rc
=
plpar_pte_remove
(
H_AVPN
,
slot
,
(
vpn
>>
4
)
&
~
0x7fUL
,
&
dummy1
,
lpar_rc
=
plpar_pte_remove
(
H_AVPN
,
slot
,
(
avpn
<<
7
),
&
dummy1
,
&
dummy2
);
if
(
lpar_rc
==
H_Not_Found
)
{
...
...
arch/ppc64/kernel/prom.c
View file @
2cb54cac
...
...
@@ -144,6 +144,7 @@ static interpret_func interpret_root_props;
#define FB_MAX 8
#endif
static
int
ppc64_is_smp
;
struct
prom_t
prom
=
{
0
,
/* entry */
...
...
arch/ppc64/kernel/smp.c
View file @
2cb54cac
...
...
@@ -25,8 +25,6 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
...
...
@@ -53,22 +51,19 @@
#include <asm/machdep.h>
int
smp_threads_ready
=
0
;
volatile
int
smp_commenced
=
0
;
int
smp_tb_synchronized
=
0
;
spinlock_t
kernel_flag
__cacheline_aligned
=
SPIN_LOCK_UNLOCKED
;
unsigned
long
cache_decay_ticks
;
static
int
max_cpus
__initdata
=
NR_CPUS
;
/* initialised so it doesnt end up in bss */
unsigned
long
cpu_online_map
=
0
;
int
boot_cpuid
=
0
;
int
ppc64_is_smp
=
0
;
volatile
unsigned
long
cpu_callin_map
[
NR_CPUS
]
=
{
0
,};
static
struct
smp_ops_t
*
smp_ops
;
volatile
unsigned
long
cpu_callin_map
[
NR_CPUS
];
extern
unsigned
char
stab_array
[];
int
start_secondary
(
void
*
);
extern
int
cpu_idle
(
void
*
unused
);
void
smp_call_function_interrupt
(
void
);
void
smp_message_pass
(
int
target
,
int
msg
,
unsigned
long
data
,
int
wait
);
...
...
@@ -86,7 +81,7 @@ struct xics_ipi_struct {
struct
xics_ipi_struct
xics_ipi_message
[
NR_CPUS
]
__cacheline_aligned
;
#define smp_message_pass(t,m,d,w)
ppc_md.smp_
message_pass((t),(m),(d),(w))
#define smp_message_pass(t,m,d,w)
smp_ops->
message_pass((t),(m),(d),(w))
static
inline
void
set_tb
(
unsigned
int
upper
,
unsigned
int
lower
)
{
...
...
@@ -106,7 +101,6 @@ void iSeries_smp_message_recv( struct pt_regs * regs )
for
(
msg
=
0
;
msg
<
4
;
++
msg
)
if
(
test_and_clear_bit
(
msg
,
&
iSeries_smp_message
[
cpu
]
)
)
smp_message_recv
(
msg
,
regs
);
}
static
void
smp_iSeries_message_pass
(
int
target
,
int
msg
,
unsigned
long
data
,
int
wait
)
...
...
@@ -127,6 +121,7 @@ static void smp_iSeries_message_pass(int target, int msg, unsigned long data, in
}
}
#ifdef CONFIG_PPC_ISERIES
static
int
smp_iSeries_numProcs
(
void
)
{
unsigned
np
,
i
;
...
...
@@ -141,24 +136,23 @@ static int smp_iSeries_numProcs(void)
}
return
np
;
}
#endif
static
void
smp_iSeries_probe
(
void
)
static
int
smp_iSeries_probe
(
void
)
{
unsigned
i
;
unsigned
np
;
struct
ItLpPaca
*
lpPaca
;
unsigned
np
=
0
;
struct
ItLpPaca
*
lpPaca
;
np
=
0
;
for
(
i
=
0
;
i
<
MAX_PACAS
;
++
i
)
{
lpPaca
=
paca
[
i
].
xLpPacaPtr
;
if
(
lpPaca
->
xDynProcStatus
<
2
)
{
if
(
lpPaca
->
xDynProcStatus
<
2
)
{
paca
[
i
].
active
=
1
;
++
np
;
paca
[
i
].
next_jiffy_update_tb
=
paca
[
0
].
next_jiffy_update_tb
;
}
}
smp_tb_synchronized
=
1
;
return
np
;
}
static
void
smp_iSeries_kick_cpu
(
int
nr
)
...
...
@@ -194,10 +188,11 @@ static void smp_iSeries_setup_cpu(int nr)
/* This is called very early. */
void
smp_init_iSeries
(
void
)
{
ppc_md
.
smp_message_pass
=
smp_iSeries_message_pass
;
ppc_md
.
smp_probe
=
smp_iSeries_probe
;
ppc_md
.
smp_kick_cpu
=
smp_iSeries_kick_cpu
;
ppc_md
.
smp_setup_cpu
=
smp_iSeries_setup_cpu
;
smp_ops
=
&
ppc_md
.
smp_ops
;
smp_ops
->
message_pass
=
smp_iSeries_message_pass
;
smp_ops
->
probe
=
smp_iSeries_probe
;
smp_ops
->
kick_cpu
=
smp_iSeries_kick_cpu
;
smp_ops
->
setup_cpu
=
smp_iSeries_setup_cpu
;
#ifdef CONFIG_PPC_ISERIES
#warning fix for iseries
naca
->
processorCount
=
smp_iSeries_numProcs
();
...
...
@@ -229,10 +224,20 @@ smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
}
}
static
void
smp_chrp_probe
(
void
)
static
int
smp_chrp_probe
(
void
)
{
if
(
ppc64_is_smp
)
int
i
;
int
nr_cpus
=
0
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
if
(
cpu_possible
(
i
))
nr_cpus
++
;
}
if
(
nr_cpus
>
1
)
openpic_request_IPIs
();
return
nr_cpus
;
}
static
void
...
...
@@ -252,78 +257,32 @@ smp_kick_cpu(int nr)
/* The processor is currently spinning, waiting
* for the xProcStart field to become non-zero
* After we set xProcStart, the processor will
* continue on to secondary_start
in iSeries_head.S
* continue on to secondary_start
*/
paca
[
nr
].
xProcStart
=
1
;
}
extern
struct
gettimeofday_struct
do_gtod
;
static
void
smp_space_timers
()
static
void
smp_space_timers
(
unsigned
int
max_cpus
)
{
int
i
;
unsigned
long
offset
=
tb_ticks_per_jiffy
/
NR_CPUS
;
unsigned
long
offset
=
tb_ticks_per_jiffy
/
max_cpus
;
unsigned
long
previous_tb
=
paca
[
boot_cpuid
].
next_jiffy_update_tb
;
for
(
i
=
1
;
i
<
NR_CPUS
;
++
i
)
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
if
(
cpu_possible
(
i
)
&&
i
!=
boot_cpuid
)
{
paca
[
i
].
next_jiffy_update_tb
=
paca
[
i
-
1
].
next_jiffy_update_tb
+
offset
;
}
static
void
smp_chrp_setup_cpu
(
int
cpu_nr
)
{
static
atomic_t
ready
=
ATOMIC_INIT
(
1
);
static
volatile
int
frozen
=
0
;
if
(
naca
->
platform
==
PLATFORM_PSERIES_LPAR
)
{
/* timebases already synced under the hypervisor. */
paca
[
cpu_nr
].
next_jiffy_update_tb
=
tb_last_stamp
=
get_tb
();
if
(
cpu_nr
==
boot_cpuid
)
{
do_gtod
.
tb_orig_stamp
=
tb_last_stamp
;
/* Should update do_gtod.stamp_xsec.
* For now we leave it which means the time can be some
* number of msecs off until someone does a settimeofday()
*/
}
smp_tb_synchronized
=
1
;
}
else
{
if
(
cpu_nr
==
boot_cpuid
)
{
/* wait for all the others */
while
(
atomic_read
(
&
ready
)
<
num_online_cpus
())
barrier
();
atomic_set
(
&
ready
,
1
);
/* freeze the timebase */
rtas_call
(
rtas_token
(
"freeze-time-base"
),
0
,
1
,
NULL
);
mb
();
frozen
=
1
;
set_tb
(
0
,
0
);
paca
[
boot_cpuid
].
next_jiffy_update_tb
=
0
;
smp_space_timers
();
while
(
atomic_read
(
&
ready
)
<
num_online_cpus
())
barrier
();
/* thaw the timebase again */
rtas_call
(
rtas_token
(
"thaw-time-base"
),
0
,
1
,
NULL
);
mb
();
frozen
=
0
;
tb_last_stamp
=
get_tb
();
do_gtod
.
tb_orig_stamp
=
tb_last_stamp
;
smp_tb_synchronized
=
1
;
}
else
{
atomic_inc
(
&
ready
);
while
(
!
frozen
)
barrier
();
set_tb
(
0
,
0
);
mb
();
atomic_inc
(
&
ready
);
while
(
frozen
)
barrier
();
previous_tb
+
offset
;
previous_tb
=
paca
[
i
].
next_jiffy_update_tb
;
}
}
}
static
void
__devinit
pSeries_setup_cpu
(
int
cpu
)
{
if
(
OpenPIC_Addr
)
{
do_openpic_setup_cpu
();
}
else
{
if
(
cpu
_nr
!=
boot_cpuid
)
if
(
cpu
!=
boot_cpuid
)
xics_setup_cpu
();
}
}
...
...
@@ -347,26 +306,65 @@ smp_xics_message_pass(int target, int msg, unsigned long data, int wait)
}
}
static
void
smp_xics_probe
(
void
)
static
int
smp_xics_probe
(
void
)
{
int
i
;
int
nr_cpus
=
0
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
if
(
cpu_possible
(
i
))
nr_cpus
++
;
}
return
nr_cpus
;
}
static
spinlock_t
timebase_lock
=
SPIN_LOCK_UNLOCKED
;
static
unsigned
long
timebase
=
0
;
static
void
__devinit
pSeries_give_timebase
(
void
)
{
spin_lock
(
&
timebase_lock
);
rtas_call
(
rtas_token
(
"freeze-time-base"
),
0
,
1
,
NULL
);
timebase
=
get_tb
();
spin_unlock
(
&
timebase_lock
);
while
(
timebase
)
barrier
();
rtas_call
(
rtas_token
(
"thaw-time-base"
),
0
,
1
,
NULL
);
}
static
void
__devinit
pSeries_take_timebase
(
void
)
{
while
(
!
timebase
)
barrier
();
spin_lock
(
&
timebase_lock
);
set_tb
(
timebase
,
timebase
>>
32
);
timebase
=
0
;
spin_unlock
(
&
timebase_lock
);
}
/* This is called very early */
void
smp_init_pSeries
(
void
)
void
__init
smp_init_pSeries
(
void
)
{
if
(
naca
->
interrupt_controller
==
IC_OPEN_PIC
)
{
ppc_md
.
smp_message_pass
=
smp_openpic_message_pass
;
ppc_md
.
smp_probe
=
smp_chrp_probe
;
ppc_md
.
smp_kick_cpu
=
smp_kick_cpu
;
ppc_md
.
smp_setup_cpu
=
smp_chrp_setup_cpu
;
smp_ops
=
&
ppc_md
.
smp_ops
;
if
(
naca
->
interrupt_controller
==
IC_OPEN_PIC
)
{
smp_ops
->
message_pass
=
smp_openpic_message_pass
;
smp_ops
->
probe
=
smp_chrp_probe
;
}
else
{
ppc_md
.
smp_message_pass
=
smp_xics_message_pass
;
ppc_md
.
smp_probe
=
smp_xics_probe
;
ppc_md
.
smp_kick_cpu
=
smp_kick_cpu
;
ppc_md
.
smp_setup_cpu
=
smp_chrp_setup_cpu
;
smp_ops
->
message_pass
=
smp_xics_message_pass
;
smp_ops
->
probe
=
smp_xics_probe
;
}
if
(
naca
->
platform
==
PLATFORM_PSERIES
)
{
smp_ops
->
give_timebase
=
pSeries_give_timebase
;
smp_ops
->
take_timebase
=
pSeries_take_timebase
;
}
}
smp_ops
->
kick_cpu
=
smp_kick_cpu
;
smp_ops
->
setup_cpu
=
pSeries_setup_cpu
;
}
void
smp_local_timer_interrupt
(
struct
pt_regs
*
regs
)
{
...
...
@@ -470,7 +468,6 @@ static struct call_data_struct {
*/
int
smp_call_function
(
void
(
*
func
)
(
void
*
info
),
void
*
info
,
int
nonatomic
,
int
wait
)
{
struct
call_data_struct
data
;
int
ret
=
-
1
,
cpus
=
num_online_cpus
()
-
1
;
...
...
@@ -553,38 +550,41 @@ void smp_call_function_interrupt(void)
atomic_inc
(
&
call_data
->
finished
);
}
extern
unsigned
long
decr_overclock
;
extern
struct
gettimeofday_struct
do_gtod
;
struct
thread_
struct
*
current_set
[
NR_CPUS
]
=
{
&
init_thread_union
,
0
}
;
struct
thread_
info
*
current_set
[
NR_CPUS
]
;
void
__init
smp_boot_cpus
(
vo
id
)
static
void
__devinit
smp_store_cpu_info
(
int
id
)
{
int
i
,
cpu_nr
=
0
;
struct
task_struct
*
p
;
printk
(
"Entering SMP Mode...
\n
"
);
paca
[
id
].
pvr
=
_get_PVR
();
}
smp_store_cpu_info
(
boot_cpuid
);
cpu_callin_map
[
boot_cpuid
]
=
1
;
void
__init
smp_prepare_cpus
(
unsigned
int
max_cpus
)
{
int
i
;
/* XXX buggy - Anton */
current_thread_info
()
->
cpu
=
0
;
/* Fixup boot cpu */
smp_store_cpu_info
(
smp_processor_id
());
cpu_callin_map
[
smp_processor_id
()]
=
1
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
paca
[
i
].
prof_counter
=
1
;
paca
[
i
].
prof_multiplier
=
1
;
if
(
i
!=
boot_cpuid
)
{
void
*
tmp
;
/*
* the boot cpu segment table is statically
* initialized to real address 0x5000. The
* Other processor's tables are created and
* initialized here.
*/
paca
[
i
].
xStab_data
.
virt
=
(
unsigned
long
)
&
stab_array
[
PAGE_SIZE
*
(
i
-
1
)];
memset
((
void
*
)
paca
[
i
].
xStab_data
.
virt
,
0
,
PAGE_SIZE
);
paca
[
i
].
xStab_data
.
real
=
__v2a
(
paca
[
i
].
xStab_data
.
virt
);
paca
[
i
].
default_decr
=
tb_ticks_per_jiffy
/
decr_overclock
;
tmp
=
&
stab_array
[
PAGE_SIZE
*
(
i
-
1
)];
memset
(
tmp
,
0
,
PAGE_SIZE
);
paca
[
i
].
xStab_data
.
virt
=
(
unsigned
long
)
tmp
;
paca
[
i
].
xStab_data
.
real
=
(
unsigned
long
)
__v2a
(
tmp
);
paca
[
i
].
default_decr
=
tb_ticks_per_jiffy
/
decr_overclock
;
}
}
...
...
@@ -593,153 +593,95 @@ void __init smp_boot_cpus(void)
*/
cache_decay_ticks
=
HZ
/
100
;
ppc_md
.
smp_probe
();
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
if
(
paca
[
i
].
active
)
cpu_nr
++
;
}
printk
(
"Probe found %d CPUs
\n
"
,
cpu_nr
);
#ifndef CONFIG_PPC_ISERIES
paca
[
boot_cpuid
].
next_jiffy_update_tb
=
tb_last_stamp
=
get_tb
();
#ifdef CONFIG_ISERIES
smp_space_timers
();
/*
* Should update do_gtod.stamp_xsec.
* For now we leave it which means the time can be some
* number of msecs off until someone does a settimeofday()
*/
do_gtod
.
tb_orig_stamp
=
tb_last_stamp
;
#endif
printk
(
"Waiting for %d CPUs
\n
"
,
cpu_nr
-
1
);
max_cpus
=
smp_ops
->
probe
();
smp_space_timers
(
max_cpus
);
}
for
(
i
=
1
;
i
<
NR_CPUS
;
i
++
)
{
int
c
;
int
__cpu_up
(
unsigned
int
cpu
)
{
struct
pt_regs
regs
;
if
(
!
paca
[
i
].
active
)
continue
;
if
(
i
==
boot_cpuid
)
continue
;
if
(
num_online_cpus
()
>=
max_cpus
)
break
;
struct
task_struct
*
p
;
int
c
;
/* create a process for the processor */
/* we don't care about the values in regs since we'll
never reschedule the forked task. */
/* We DO care about one bit in the pt_regs we
pass to do_fork. That is the MSR_FP bit in
regs.msr. If that bit is on, then do_fork
(via copy_thread) will call giveup_fpu.
giveup_fpu will get a pointer to our (current's)
last register savearea via current->thread.regs
and using that pointer will turn off the MSR_FP,
MSR_FE0 and MSR_FE1 bits. At this point, this
pointer is pointing to some arbitrary point within
our stack */
/* only regs.msr is actually used, and 0 is OK for it */
memset
(
&
regs
,
0
,
sizeof
(
struct
pt_regs
));
p
=
do_fork
(
CLONE_VM
|
CLONE_IDLETASK
,
0
,
&
regs
,
0
);
if
(
IS_ERR
(
p
))
panic
(
"failed fork for CPU %d"
,
i
);
init_idle
(
p
,
i
);
panic
(
"failed fork for CPU %u: %li"
,
cpu
,
PTR_ERR
(
p
));
init_idle
(
p
,
cpu
);
unhash_process
(
p
);
paca
[
i
].
xCurrent
=
(
u64
)
p
;
current_set
[
i
]
=
p
->
thread_info
;
paca
[
cpu
].
xCurrent
=
(
u64
)
p
;
current_set
[
cpu
]
=
p
->
thread_info
;
/* wake up cpus */
ppc_md
.
smp_kick_cpu
(
i
);
smp_ops
->
kick_cpu
(
cpu
);
/*
* wait to see if the cpu made a callin (is actually up).
* use this value that I found through experimentation.
* -- Cort
*/
for
(
c
=
5000
;
c
&&
!
cpu_callin_map
[
i
]
;
c
--
)
{
for
(
c
=
5000
;
c
&&
!
cpu_callin_map
[
cpu
];
c
--
)
udelay
(
100
);
}
if
(
cpu_callin_map
[
i
]
)
{
printk
(
"Processor %d found.
\n
"
,
i
);
/* this sync's the decr's -- Cort */
}
else
{
printk
(
"Processor %d is stuck.
\n
"
,
i
);
}
if
(
!
cpu_callin_map
[
cpu
])
{
printk
(
"Processor %u is stuck.
\n
"
,
cpu
);
return
-
ENOENT
;
}
/* Setup boot cpu last (important) */
ppc_md
.
smp_setup_cpu
(
boot_cpuid
);
printk
(
"Processor %u found.
\n
"
,
cpu
);
if
(
num_online_cpus
()
<
2
)
{
tb_last_stamp
=
get_tb
();
smp_tb_synchronized
=
1
;
}
if
(
smp_ops
->
give_timebase
)
smp_ops
->
give_timebase
();
set_bit
(
cpu
,
&
cpu_online_map
)
;
return
0
;
}
void
__init
smp_commence
(
void
)
/* Activate a secondary processor. */
int
__devinit
start_secondary
(
void
*
unused
)
{
/*
* Lets the callin's below out of their loop.
*/
PPCDBG
(
PPCDBG_SMP
,
"smp_commence: start
\n
"
);
wmb
();
smp_commenced
=
1
;
}
unsigned
int
cpu
=
smp_processor_id
();
void
__init
smp_callin
(
void
)
{
int
cpu
=
smp_processor_id
();
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
smp_store_cpu_info
(
cpu
);
set_dec
(
paca
[
cpu
].
default_decr
);
set_bit
(
smp_processor_id
(),
&
cpu_online_map
);
smp_mb
();
cpu_callin_map
[
cpu
]
=
1
;
ppc_md
.
smp_setup_cpu
(
cpu
);
smp_ops
->
setup_cpu
(
cpu
);
if
(
smp_ops
->
take_timebase
)
smp_ops
->
take_timebase
();
while
(
!
smp_commenced
)
{
barrier
();
}
/* XXX required? */
local_irq_enable
();
}
/* intel needs this */
void
__init
initialize_secondary
(
void
)
{
}
/* Activate a secondary processor. */
int
start_secondary
(
void
*
unused
)
{
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
smp_callin
();
return
cpu_idle
(
NULL
);
}
void
__init
smp_setup
(
char
*
str
,
int
*
ints
)
{
}
int
setup_profiling_timer
(
unsigned
int
multiplier
)
{
return
0
;
}
/* this function is called for each processor
*/
void
__init
smp_store_cpu_info
(
int
id
)
void
smp_cpus_done
(
unsigned
int
max_cpus
)
{
paca
[
id
].
pvr
=
_get_PVR
();
}
smp_ops
->
setup_cpu
(
boot_cpuid
);
static
int
__init
maxcpus
(
char
*
str
)
{
get_option
(
&
str
,
&
max_cpus
);
return
1
;
/* XXX fix this, xics currently relies on it - Anton */
smp_threads_ready
=
1
;
}
__setup
(
"maxcpus="
,
maxcpus
);
arch/ppc64/kernel/sys_ppc32.c
View file @
2cb54cac
...
...
@@ -4438,30 +4438,32 @@ asmlinkage int sys32_vm86(u32 a1, u32 a2, u32 a3, u32 a4)
return
sys_vm86
((
int
)
a1
,
(
int
)
a2
,
(
int
)
a3
,
(
int
)
a4
);
}
extern
ssize_t
sys_pread64
(
unsigned
int
fd
,
char
*
buf
,
size_t
count
,
loff_t
pos
);
extern
asmlinkage
ssize_t
sys_pread
(
unsigned
int
fd
,
char
*
buf
,
size_t
count
,
loff_t
pos
);
extern
asmlinkage
ssize_t
sys_pwrite
(
unsigned
int
fd
,
const
char
*
buf
,
size_t
count
,
loff_t
pos
);
extern
ssize_t
sys_pwrite64
(
unsigned
int
fd
,
const
char
*
buf
,
size_t
count
,
loff_t
pos
);
typedef
__kernel_ssize_t32
ssize_t32
;
asmlinkage
ssize_t32
sys32_pread
(
unsigned
int
fd
,
char
*
ubuf
,
__kernel_size_t32
count
,
u32
reg6
,
u32
poshi
,
u32
poslo
)
ssize_t32
sys32_pread64
(
unsigned
int
fd
,
char
*
ubuf
,
__kernel_size_t32
count
,
u32
reg6
,
u32
poshi
,
u32
poslo
)
{
return
sys_pread
(
fd
,
ubuf
,
count
,
((
loff_t
)
AA
(
poshi
)
<<
32
)
|
AA
(
poslo
));
return
sys_pread
64
(
fd
,
ubuf
,
count
,
((
loff_t
)
AA
(
poshi
)
<<
32
)
|
AA
(
poslo
));
}
asmlinkage
ssize_t32
sys32_pwrite
(
unsigned
int
fd
,
char
*
ubuf
,
__kernel_size_t32
count
,
u32
reg6
,
u32
poshi
,
u32
poslo
)
ssize_t32
sys32_pwrite64
(
unsigned
int
fd
,
char
*
ubuf
,
__kernel_size_t32
count
,
u32
reg6
,
u32
poshi
,
u32
poslo
)
{
return
sys_pwrite
(
fd
,
ubuf
,
count
,
((
loff_t
)
AA
(
poshi
)
<<
32
)
|
AA
(
poslo
));
return
sys_pwrite
64
(
fd
,
ubuf
,
count
,
((
loff_t
)
AA
(
poshi
)
<<
32
)
|
AA
(
poslo
));
}
extern
ssize_t
sys_readahead
(
int
fd
,
loff_t
offset
,
size_t
count
);
ssize_t32
sys32_readahead
(
int
fd
,
u32
offhi
,
u32
offlo
,
s32
count
)
{
return
sys_readahead
(
fd
,
((
loff_t
)
AA
(
offhi
)
<<
32
)
|
AA
(
offlo
),
count
);
}
extern
asmlinkage
long
sys_truncate
(
const
char
*
path
,
unsigned
long
length
);
extern
asmlinkage
long
sys_ftruncate
(
unsigned
int
fd
,
unsigned
long
length
);
...
...
arch/ppc64/kernel/time.c
View file @
2cb54cac
...
...
@@ -293,9 +293,6 @@ int timer_interrupt(struct pt_regs * regs)
irq_exit
();
if
(
softirq_pending
(
cpu
))
do_softirq
();
return
1
;
}
...
...
arch/ppc64/kernel/traps.c
View file @
2cb54cac
...
...
@@ -128,13 +128,12 @@ void
SystemResetException
(
struct
pt_regs
*
regs
)
{
if
(
fwnmi_active
)
{
char
*
msg
;
unsigned
long
*
r3
=
__va
(
regs
->
gpr
[
3
]);
/* for FWNMI debug */
struct
rtas_error_log
*
errlog
;
msg
=
"FWNMI is active with save area at %016lx
\n
"
;
udbg_printf
(
msg
,
r3
);
printk
(
msg
,
r3
);
udbg_printf
(
"FWNMI is active with save area at %016lx
\n
"
,
r3
);
errlog
=
FWNMI_get_errinfo
(
regs
);
FWNMI_release_errinfo
();
}
if
(
debugger
)
...
...
arch/ppc64/kernel/xics.c
View file @
2cb54cac
...
...
@@ -373,7 +373,7 @@ xics_init_IRQ( void )
if
(
naca
->
platform
==
PLATFORM_PSERIES
)
{
#ifdef CONFIG_SMP
for
(
i
=
0
;
i
<
NR_CPUS
;
++
i
)
{
if
(
!
paca
[
i
].
active
)
if
(
!
cpu_possible
(
i
)
)
continue
;
xics_info
.
per_cpu
[
i
]
=
__ioremap
((
ulong
)
inodes
[
i
].
addr
,
...
...
include/asm-ppc64/hardirq.h
View file @
2cb54cac
...
...
@@ -13,30 +13,86 @@
#include <linux/preempt.h>
typedef
struct
{
unsigned
long
__softirq_pending
;
unsigned
long
__syscall_count
;
unsigned
int
__softirq_pending
;
unsigned
int
__syscall_count
;
struct
task_struct
*
__ksoftirqd_task
;
unsigned
long
idle_timestamp
;
}
____cacheline_aligned
irq_cpustat_t
;
#include <linux/irq_cpustat.h>
/* Standard mappings for irq_cpustat_t above */
#define IRQ_OFFSET 64
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-24 are the hardirq count (max # of hardirqs: 512)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x01ff0000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 9
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define __HARDIRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__HARDIRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__HARDIRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__HARDIRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#define in_interrupt() \
((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_irq in_interrupt
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
...
...
@@ -45,4 +101,13 @@ typedef struct {
#endif
/* CONFIG_SMP */
#endif
/* __KERNEL__ */
#define show_stack(SP) \
do { \
if (SP) \
print_backtrace(SP); \
else \
print_backtrace(_get_SP()); \
} while (0)
#endif
/* __ASM_HARDIRQ_H */
include/asm-ppc64/machdep.h
View file @
2cb54cac
...
...
@@ -20,6 +20,17 @@ struct device_node;
struct
TceTable
;
struct
rtc_time
;
#ifdef CONFIG_SMP
struct
smp_ops_t
{
void
(
*
message_pass
)(
int
target
,
int
msg
,
unsigned
long
data
,
int
wait
);
int
(
*
probe
)(
void
);
void
(
*
kick_cpu
)(
int
nr
);
void
(
*
setup_cpu
)(
int
nr
);
void
(
*
take_timebase
)(
void
);
void
(
*
give_timebase
)(
void
);
};
#endif
struct
machdep_calls
{
/* High use functions in the first cachelines, low use functions
* follow. DRENG collect profile data.
...
...
@@ -58,14 +69,6 @@ struct machdep_calls {
void
(
*
tce_free_one
)(
struct
TceTable
*
tbl
,
long
tcenum
);
void
(
*
smp_message_pass
)(
int
target
,
int
msg
,
unsigned
long
data
,
int
wait
);
void
(
*
smp_probe
)(
void
);
void
(
*
smp_kick_cpu
)(
int
nr
);
void
(
*
smp_setup_cpu
)(
int
nr
);
void
(
*
setup_arch
)(
void
);
/* Optional, may be NULL. */
void
(
*
setup_residual
)(
struct
seq_file
*
m
,
int
cpu_id
);
...
...
@@ -145,6 +148,11 @@ struct machdep_calls {
/* this is for modules, since _machine can be a define -- Cort */
int
ppc_machine
;
#ifdef CONFIG_SMP
/* functions for dealing with other cpus */
struct
smp_ops_t
smp_ops
;
#endif
/* CONFIG_SMP */
};
extern
struct
machdep_calls
ppc_md
;
...
...
include/asm-ppc64/mmzone.h
View file @
2cb54cac
...
...
@@ -22,7 +22,10 @@ extern plat_pg_data_t plat_node_data[];
#define MAX_NUMNODES 4
/* XXX grab this from the device tree - Anton */
#define PHYSADDR_TO_NID(pa) ((pa) >> 36)
#define MEMORY_ZONE_BITS 33
#define CPU_SHIFT_BITS 1
#define PHYSADDR_TO_NID(pa) ((pa) >> MEMORY_ZONE_BITS)
#define PLAT_NODE_DATA(n) (&plat_node_data[(n)])
#define PLAT_NODE_DATA_STARTNR(n) \
(PLAT_NODE_DATA(n)->gendata.node_start_mapnr)
...
...
@@ -84,7 +87,7 @@ extern plat_pg_data_t plat_node_data[];
#ifdef CONFIG_NUMA
/* XXX grab this from the device tree - Anton */
#define cputonode(cpu) ((cpu) >>
3
)
#define cputonode(cpu) ((cpu) >>
CPU_SHIFT_BITS
)
#define numa_node_id() cputonode(smp_processor_id())
...
...
include/asm-ppc64/rwsem.h
View file @
2cb54cac
...
...
@@ -57,6 +57,7 @@ struct rw_semaphore {
extern
struct
rw_semaphore
*
rwsem_down_read_failed
(
struct
rw_semaphore
*
sem
);
extern
struct
rw_semaphore
*
rwsem_down_write_failed
(
struct
rw_semaphore
*
sem
);
extern
struct
rw_semaphore
*
rwsem_wake
(
struct
rw_semaphore
*
sem
);
extern
struct
rw_semaphore
*
rwsem_downgrade_wake
(
struct
rw_semaphore
*
sem
);
static
inline
void
init_rwsem
(
struct
rw_semaphore
*
sem
)
{
...
...
@@ -73,12 +74,26 @@ static inline void init_rwsem(struct rw_semaphore *sem)
*/
static
inline
void
__down_read
(
struct
rw_semaphore
*
sem
)
{
if
(
atomic_inc_return
((
atomic_t
*
)(
&
sem
->
count
))
>
=
0
)
if
(
atomic_inc_return
((
atomic_t
*
)(
&
sem
->
count
))
>
0
)
smp_wmb
();
else
rwsem_down_read_failed
(
sem
);
}
static
inline
int
__down_read_trylock
(
struct
rw_semaphore
*
sem
)
{
int
tmp
;
while
((
tmp
=
sem
->
count
)
>=
0
)
{
if
(
tmp
==
cmpxchg
(
&
sem
->
count
,
tmp
,
tmp
+
RWSEM_ACTIVE_READ_BIAS
))
{
smp_wmb
();
return
1
;
}
}
return
0
;
}
/*
* lock for writing
*/
...
...
@@ -94,6 +109,16 @@ static inline void __down_write(struct rw_semaphore *sem)
rwsem_down_write_failed
(
sem
);
}
static
inline
int
__down_write_trylock
(
struct
rw_semaphore
*
sem
)
{
int
tmp
;
tmp
=
cmpxchg
(
&
sem
->
count
,
RWSEM_UNLOCKED_VALUE
,
RWSEM_ACTIVE_WRITE_BIAS
);
smp_wmb
();
return
tmp
==
RWSEM_UNLOCKED_VALUE
;
}
/*
* unlock after reading
*/
...
...
@@ -126,6 +151,19 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
atomic_add
(
delta
,
(
atomic_t
*
)(
&
sem
->
count
));
}
/*
* downgrade write lock to read lock
*/
static
inline
void
__downgrade_write
(
struct
rw_semaphore
*
sem
)
{
int
tmp
;
smp_wmb
();
tmp
=
atomic_add_return
(
-
RWSEM_WAITING_BIAS
,
(
atomic_t
*
)(
&
sem
->
count
));
if
(
tmp
<
0
)
rwsem_downgrade_wake
(
sem
);
}
/*
* implement exchange and add functionality
*/
...
...
include/asm-ppc64/smp.h
View file @
2cb54cac
...
...
@@ -30,7 +30,6 @@
extern
unsigned
long
cpu_online_map
;
extern
void
smp_message_pass
(
int
target
,
int
msg
,
unsigned
long
data
,
int
wait
);
extern
void
smp_store_cpu_info
(
int
id
);
extern
void
smp_send_tlb_invalidate
(
int
);
extern
void
smp_send_xmon_break
(
int
cpu
);
struct
pt_regs
;
...
...
include/asm-ppc64/softirq.h
View file @
2cb54cac
...
...
@@ -12,24 +12,16 @@
#include <asm/hardirq.h>
#define local_bh_disable() \
do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
do { barrier(); preempt_count() -=
SOFT
IRQ_OFFSET; } while (0)
#define local_bh_enable() \
do { \
if (unlikely((preempt_count() == IRQ_OFFSET) && \
softirq_pending(smp_processor_id()))) { \
__local_bh_enable(); \
if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
do_softirq(); \
preempt_check_resched(); \
} else { \
__local_bh_enable(); \
preempt_check_resched(); \
} \
} while (0)
#define in_softirq() in_interrupt()
#endif
/* __ASM_SOFTIRQ_H */
include/asm-ppc64/system.h
View file @
2cb54cac
...
...
@@ -88,26 +88,18 @@ struct task_struct;
extern
void
__switch_to
(
struct
task_struct
*
,
struct
task_struct
*
);
#define switch_to(prev, next, last) __switch_to((prev), (next))
#define prepare_arch_schedule(prev) do { } while(0)
#define finish_arch_schedule(prev) do { } while(0)
#define prepare_arch_switch(rq) do { } while(0)
#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
struct
thread_struct
;
extern
void
_switch
(
struct
thread_struct
*
prev
,
struct
thread_struct
*
next
);
struct
pt_regs
;
extern
void
dump_regs
(
struct
pt_regs
*
);
#ifndef CONFIG_SMP
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#define save_flags(flags) local_save_flags(flags)
#define restore_flags(flags) local_irq_restore(flags)
#define save_and_cli(flags) local_irq_save(flags)
#endif
/* !CONFIG_SMP */
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!(flags & MSR_EE); \
})
static
__inline__
int
__is_processor
(
unsigned
long
pv
)
{
...
...
include/asm-ppc64/thread_info.h
View file @
2cb54cac
...
...
@@ -27,6 +27,8 @@ struct thread_info {
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
...
...
@@ -34,6 +36,7 @@ struct thread_info {
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
preempt_count: 1, \
}
#define init_thread_info (init_thread_union.thread_info)
...
...
include/asm-ppc64/unistd.h
View file @
2cb54cac
...
...
@@ -200,6 +200,7 @@
#define __NR_putpmsg 188
/* some people actually want streams */
#define __NR_vfork 189
#define __NR_ugetrlimit 190
/* SuS compliant getrlimit */
#define __NR_readahead 191
#define __NR_mmap2 192
#define __NR_truncate64 193
#define __NR_ftruncate64 194
...
...
@@ -230,15 +231,10 @@
#define __NR_lremovexattr 219
#define __NR_fremovexattr 220
#define __NR_futex 221
#define __NR_tux 222
#define __NR_sched_setaffinity 223
#define __NR_sched_getaffinity 224
#if 0
/* Remind paulus to add these into ppc32 */
__NR_security
__NR_readahead
#endif
#define __NR_sched_setaffinity 222
#define __NR_sched_getaffinity 223
#define __NR_security 224
#define __NR_tuxcall 225
#define __NR(n) #n
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment