Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
72747577
Commit
72747577
authored
Oct 02, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://linux-ntfs.bkbits.net/ntfs-2.6
into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents
4eeb1b30
958e19c3
Changes
52
Show whitespace changes
Inline
Side-by-side
Showing
52 changed files
with
798 additions
and
554 deletions
+798
-554
Documentation/ioctl-number.txt
Documentation/ioctl-number.txt
+1
-0
Documentation/kernel-parameters.txt
Documentation/kernel-parameters.txt
+12
-3
Documentation/scsi/scsi_mid_low_api.txt
Documentation/scsi/scsi_mid_low_api.txt
+0
-4
Documentation/sysctl/vm.txt
Documentation/sysctl/vm.txt
+1
-1
arch/i386/kernel/kprobes.c
arch/i386/kernel/kprobes.c
+5
-5
arch/i386/kernel/traps.c
arch/i386/kernel/traps.c
+6
-6
arch/i386/mm/fault.c
arch/i386/mm/fault.c
+1
-1
arch/m32r/kernel/entry.S
arch/m32r/kernel/entry.S
+10
-0
arch/m32r/kernel/irq.c
arch/m32r/kernel/irq.c
+5
-3
arch/m32r/kernel/setup.c
arch/m32r/kernel/setup.c
+1
-1
arch/m32r/kernel/setup_m32700ut.c
arch/m32r/kernel/setup_m32700ut.c
+1
-3
arch/m32r/kernel/setup_mappi.c
arch/m32r/kernel/setup_mappi.c
+1
-5
arch/m32r/kernel/setup_mappi2.c
arch/m32r/kernel/setup_mappi2.c
+0
-4
arch/m32r/kernel/setup_oaks32r.c
arch/m32r/kernel/setup_oaks32r.c
+0
-4
arch/m32r/kernel/setup_opsput.c
arch/m32r/kernel/setup_opsput.c
+0
-2
arch/m32r/kernel/setup_usrv.c
arch/m32r/kernel/setup_usrv.c
+0
-4
arch/m32r/kernel/signal.c
arch/m32r/kernel/signal.c
+2
-9
arch/m32r/kernel/smp.c
arch/m32r/kernel/smp.c
+2
-1
arch/mips/vr41xx/common/icu.c
arch/mips/vr41xx/common/icu.c
+132
-82
arch/mips/vr41xx/common/vrc4173.c
arch/mips/vr41xx/common/vrc4173.c
+90
-0
arch/ppc64/kernel/process.c
arch/ppc64/kernel/process.c
+1
-1
arch/ppc64/kernel/sys_ppc32.c
arch/ppc64/kernel/sys_ppc32.c
+1
-1
arch/sparc64/kernel/kprobes.c
arch/sparc64/kernel/kprobes.c
+6
-6
arch/sparc64/kernel/traps.c
arch/sparc64/kernel/traps.c
+14
-14
arch/sparc64/mm/fault.c
arch/sparc64/mm/fault.c
+2
-2
arch/x86_64/kernel/nmi.c
arch/x86_64/kernel/nmi.c
+2
-1
arch/x86_64/kernel/traps.c
arch/x86_64/kernel/traps.c
+10
-6
drivers/block/ioctl.c
drivers/block/ioctl.c
+2
-1
drivers/char/random.c
drivers/char/random.c
+4
-1
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_ondemand.c
+31
-11
drivers/firmware/Kconfig
drivers/firmware/Kconfig
+3
-2
drivers/macintosh/therm_adt746x.c
drivers/macintosh/therm_adt746x.c
+14
-21
drivers/mtd/maps/ixp4xx.c
drivers/mtd/maps/ixp4xx.c
+20
-4
drivers/net/wan/pc300_tty.c
drivers/net/wan/pc300_tty.c
+9
-10
fs/jffs2/super.c
fs/jffs2/super.c
+3
-2
include/asm-m32r/bitops.h
include/asm-m32r/bitops.h
+80
-76
include/asm-m32r/hardirq.h
include/asm-m32r/hardirq.h
+5
-25
include/asm-m32r/m32102.h
include/asm-m32r/m32102.h
+4
-3
include/asm-m32r/m32r.h
include/asm-m32r/m32r.h
+3
-4
include/asm-m32r/m32r_mp_fpga.h
include/asm-m32r/m32r_mp_fpga.h
+4
-4
include/asm-m32r/semaphore.h
include/asm-m32r/semaphore.h
+33
-85
include/asm-m32r/spinlock.h
include/asm-m32r/spinlock.h
+105
-94
include/asm-mips/vr41xx/vrc4173.h
include/asm-mips/vr41xx/vrc4173.h
+106
-0
include/asm-ppc64/eeh.h
include/asm-ppc64/eeh.h
+16
-31
include/asm-x86_64/msi.h
include/asm-x86_64/msi.h
+2
-1
include/asm-x86_64/smp.h
include/asm-x86_64/smp.h
+8
-0
include/linux/notifier.h
include/linux/notifier.h
+4
-0
include/linux/timex.h
include/linux/timex.h
+0
-2
kernel/kprobes.c
kernel/kprobes.c
+3
-0
kernel/power/swsusp.c
kernel/power/swsusp.c
+7
-0
kernel/timer.c
kernel/timer.c
+7
-6
mm/vmscan.c
mm/vmscan.c
+19
-2
No files found.
Documentation/ioctl-number.txt
View file @
72747577
...
@@ -117,6 +117,7 @@ Code Seq# Include File Comments
...
@@ -117,6 +117,7 @@ Code Seq# Include File Comments
<mailto:natalia@nikhefk.nikhef.nl>
<mailto:natalia@nikhefk.nikhef.nl>
'c' 00-7F linux/comstats.h conflict!
'c' 00-7F linux/comstats.h conflict!
'c' 00-7F linux/coda.h conflict!
'c' 00-7F linux/coda.h conflict!
'd' 00-FF linux/char/drm/drm/h conflict!
'd' 00-1F linux/devfs_fs.h conflict!
'd' 00-1F linux/devfs_fs.h conflict!
'd' 00-DF linux/video_decoder.h conflict!
'd' 00-DF linux/video_decoder.h conflict!
'd' F0-FF linux/digi1.h
'd' F0-FF linux/digi1.h
...
...
Documentation/kernel-parameters.txt
View file @
72747577
...
@@ -97,9 +97,6 @@ running once the system is up.
...
@@ -97,9 +97,6 @@ running once the system is up.
See header of drivers/scsi/53c7xx.c.
See header of drivers/scsi/53c7xx.c.
See also Documentation/scsi/ncr53c7xx.txt.
See also Documentation/scsi/ncr53c7xx.txt.
98busmouse.irq= [HW,MOUSE] PC-9801 Bus Mouse Driver
Format: <irq>, default is 13
acpi= [HW,ACPI] Advanced Configuration and Power Interface
acpi= [HW,ACPI] Advanced Configuration and Power Interface
Format: { force | off | ht | strict }
Format: { force | off | ht | strict }
force -- enable ACPI if default was off
force -- enable ACPI if default was off
...
@@ -533,6 +530,18 @@ running once the system is up.
...
@@ -533,6 +530,18 @@ running once the system is up.
isapnp= [ISAPNP]
isapnp= [ISAPNP]
Format: <RDP>, <reset>, <pci_scan>, <verbosity>
Format: <RDP>, <reset>, <pci_scan>, <verbosity>
isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler.
Format: <cpu number>, ..., <cpu number>
This option can be used to specify one or more CPUs
to isolate from the general SMP balancing and scheduling
algorithms. The only way to move a process onto or off
an "isolated" CPU is via the CPU affinity syscalls.
This option is the preferred way to isolate CPUs. The
alternative - manually setting the CPU mask of all tasks
in the system can cause problems and suboptimal load
balancer performance.
isp16= [HW,CD]
isp16= [HW,CD]
Format: <io>,<irq>,<dma>,<setup>
Format: <io>,<irq>,<dma>,<setup>
...
...
Documentation/scsi/scsi_mid_low_api.txt
View file @
72747577
...
@@ -1091,10 +1091,6 @@ Details:
...
@@ -1091,10 +1091,6 @@ Details:
* mid level does not recognize it, then the LLD that controls
* mid level does not recognize it, then the LLD that controls
* the device receives the ioctl. According to recent Unix standards
* the device receives the ioctl. According to recent Unix standards
* unsupported ioctl() 'cmd' numbers should return -ENOTTY.
* unsupported ioctl() 'cmd' numbers should return -ENOTTY.
* However the mid level returns -EINVAL for unrecognized 'cmd'
* numbers when this function is not supplied by the driver.
* Unfortunately some applications expect -EINVAL and react badly
* when -ENOTTY is returned; stick with -EINVAL.
*
*
* Optionally defined in: LLD
* Optionally defined in: LLD
**/
**/
...
...
Documentation/sysctl/vm.txt
View file @
72747577
...
@@ -47,7 +47,7 @@ of free memory left when userspace requests more memory.
...
@@ -47,7 +47,7 @@ of free memory left when userspace requests more memory.
When this flag is 1, the kernel pretends there is always enough
When this flag is 1, the kernel pretends there is always enough
memory until it actually runs out.
memory until it actually runs out.
When this flag is 2, the kernel uses a "
strict overcommit"
When this flag is 2, the kernel uses a "
never overcommit"
policy that attempts to prevent any overcommit of memory.
policy that attempts to prevent any overcommit of memory.
This feature can be very useful because there are a lot of
This feature can be very useful because there are a lot of
...
...
arch/i386/kernel/kprobes.c
View file @
72747577
...
@@ -267,26 +267,26 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
...
@@ -267,26 +267,26 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
switch
(
val
)
{
switch
(
val
)
{
case
DIE_INT3
:
case
DIE_INT3
:
if
(
kprobe_handler
(
args
->
regs
))
if
(
kprobe_handler
(
args
->
regs
))
return
NOTIFY_
OK
;
return
NOTIFY_
STOP
;
break
;
break
;
case
DIE_DEBUG
:
case
DIE_DEBUG
:
if
(
post_kprobe_handler
(
args
->
regs
))
if
(
post_kprobe_handler
(
args
->
regs
))
return
NOTIFY_
OK
;
return
NOTIFY_
STOP
;
break
;
break
;
case
DIE_GPF
:
case
DIE_GPF
:
if
(
kprobe_running
()
&&
if
(
kprobe_running
()
&&
kprobe_fault_handler
(
args
->
regs
,
args
->
trapnr
))
kprobe_fault_handler
(
args
->
regs
,
args
->
trapnr
))
return
NOTIFY_
OK
;
return
NOTIFY_
STOP
;
break
;
break
;
case
DIE_PAGE_FAULT
:
case
DIE_PAGE_FAULT
:
if
(
kprobe_running
()
&&
if
(
kprobe_running
()
&&
kprobe_fault_handler
(
args
->
regs
,
args
->
trapnr
))
kprobe_fault_handler
(
args
->
regs
,
args
->
trapnr
))
return
NOTIFY_
OK
;
return
NOTIFY_
STOP
;
break
;
break
;
default:
default:
break
;
break
;
}
}
return
NOTIFY_
BAD
;
return
NOTIFY_
DONE
;
}
}
int
setjmp_pre_handler
(
struct
kprobe
*
p
,
struct
pt_regs
*
regs
)
int
setjmp_pre_handler
(
struct
kprobe
*
p
,
struct
pt_regs
*
regs
)
...
...
arch/i386/kernel/traps.c
View file @
72747577
...
@@ -459,7 +459,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
...
@@ -459,7 +459,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
info.si_code = sicode; \
info.si_code = sicode; \
info.si_addr = (void __user *)siaddr; \
info.si_addr = (void __user *)siaddr; \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_
OK
) \
== NOTIFY_
STOP
) \
return; \
return; \
do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
}
}
...
@@ -528,7 +528,7 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
...
@@ -528,7 +528,7 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
gp_in_kernel:
gp_in_kernel:
if
(
!
fixup_exception
(
regs
))
{
if
(
!
fixup_exception
(
regs
))
{
if
(
notify_die
(
DIE_GPF
,
"general protection fault"
,
regs
,
if
(
notify_die
(
DIE_GPF
,
"general protection fault"
,
regs
,
error_code
,
13
,
SIGSEGV
)
==
NOTIFY_
OK
);
error_code
,
13
,
SIGSEGV
)
==
NOTIFY_
STOP
);
return
;
return
;
die
(
"general protection fault"
,
regs
,
error_code
);
die
(
"general protection fault"
,
regs
,
error_code
);
}
}
...
@@ -602,7 +602,7 @@ static void default_do_nmi(struct pt_regs * regs)
...
@@ -602,7 +602,7 @@ static void default_do_nmi(struct pt_regs * regs)
if
(
!
(
reason
&
0xc0
))
{
if
(
!
(
reason
&
0xc0
))
{
if
(
notify_die
(
DIE_NMI_IPI
,
"nmi_ipi"
,
regs
,
reason
,
0
,
SIGINT
)
if
(
notify_die
(
DIE_NMI_IPI
,
"nmi_ipi"
,
regs
,
reason
,
0
,
SIGINT
)
==
NOTIFY_
BAD
)
==
NOTIFY_
STOP
)
return
;
return
;
#ifdef CONFIG_X86_LOCAL_APIC
#ifdef CONFIG_X86_LOCAL_APIC
/*
/*
...
@@ -617,7 +617,7 @@ static void default_do_nmi(struct pt_regs * regs)
...
@@ -617,7 +617,7 @@ static void default_do_nmi(struct pt_regs * regs)
unknown_nmi_error
(
reason
,
regs
);
unknown_nmi_error
(
reason
,
regs
);
return
;
return
;
}
}
if
(
notify_die
(
DIE_NMI
,
"nmi"
,
regs
,
reason
,
0
,
SIGINT
)
==
NOTIFY_
BAD
)
if
(
notify_die
(
DIE_NMI
,
"nmi"
,
regs
,
reason
,
0
,
SIGINT
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
reason
&
0x80
)
if
(
reason
&
0x80
)
mem_parity_error
(
reason
,
regs
);
mem_parity_error
(
reason
,
regs
);
...
@@ -666,7 +666,7 @@ void unset_nmi_callback(void)
...
@@ -666,7 +666,7 @@ void unset_nmi_callback(void)
asmlinkage
int
do_int3
(
struct
pt_regs
*
regs
,
long
error_code
)
asmlinkage
int
do_int3
(
struct
pt_regs
*
regs
,
long
error_code
)
{
{
if
(
notify_die
(
DIE_INT3
,
"int3"
,
regs
,
error_code
,
3
,
SIGTRAP
)
if
(
notify_die
(
DIE_INT3
,
"int3"
,
regs
,
error_code
,
3
,
SIGTRAP
)
==
NOTIFY_
OK
)
==
NOTIFY_
STOP
)
return
1
;
return
1
;
/* This is an interrupt gate, because kprobes wants interrupts
/* This is an interrupt gate, because kprobes wants interrupts
disabled. Normal trap handlers don't. */
disabled. Normal trap handlers don't. */
...
@@ -707,7 +707,7 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code)
...
@@ -707,7 +707,7 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code)
__asm__
__volatile__
(
"movl %%db6,%0"
:
"=r"
(
condition
));
__asm__
__volatile__
(
"movl %%db6,%0"
:
"=r"
(
condition
));
if
(
notify_die
(
DIE_DEBUG
,
"debug"
,
regs
,
condition
,
error_code
,
if
(
notify_die
(
DIE_DEBUG
,
"debug"
,
regs
,
condition
,
error_code
,
SIGTRAP
)
==
NOTIFY_
OK
)
SIGTRAP
)
==
NOTIFY_
STOP
)
return
;
return
;
/* It's safe to allow irq's after DR6 has been saved */
/* It's safe to allow irq's after DR6 has been saved */
if
(
regs
->
eflags
&
X86_EFLAGS_IF
)
if
(
regs
->
eflags
&
X86_EFLAGS_IF
)
...
...
arch/i386/mm/fault.c
View file @
72747577
...
@@ -227,7 +227,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
...
@@ -227,7 +227,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
__asm__
(
"movl %%cr2,%0"
:
"=r"
(
address
));
__asm__
(
"movl %%cr2,%0"
:
"=r"
(
address
));
if
(
notify_die
(
DIE_PAGE_FAULT
,
"page fault"
,
regs
,
error_code
,
14
,
if
(
notify_die
(
DIE_PAGE_FAULT
,
"page fault"
,
regs
,
error_code
,
14
,
SIGSEGV
)
==
NOTIFY_
OK
)
SIGSEGV
)
==
NOTIFY_
STOP
)
return
;
return
;
/* It's safe to allow irq's after cr2 has been saved */
/* It's safe to allow irq's after cr2 has been saved */
if
(
regs
->
eflags
&
(
X86_EFLAGS_IF
|
VM_MASK
))
if
(
regs
->
eflags
&
(
X86_EFLAGS_IF
|
VM_MASK
))
...
...
arch/m32r/kernel/entry.S
View file @
72747577
...
@@ -992,6 +992,16 @@ ENTRY(sys_call_table)
...
@@ -992,6 +992,16 @@ ENTRY(sys_call_table)
.
long
sys_mq_notify
.
long
sys_mq_notify
.
long
sys_mq_getsetattr
.
long
sys_mq_getsetattr
.
long
sys_ni_syscall
/*
reserved
for
kexec
*/
.
long
sys_ni_syscall
/*
reserved
for
kexec
*/
.
long
sys_waitid
.
long
sys_perfctr_info
.
long
sys_vperfctr_open
.
long
sys_vperfctr_control
.
long
sys_vperfctr_unlink
.
long
sys_vperfctr_iresume
.
long
sys_vperfctr_read
/*
290
*/
.
long
sys_add_key
.
long
sys_request_key
.
long
sys_keyctl
syscall_table_size
=(.-
sys_call_table
)
syscall_table_size
=(.-
sys_call_table
)
arch/m32r/kernel/irq.c
View file @
72747577
...
@@ -187,15 +187,17 @@ int handle_IRQ_event(unsigned int irq,
...
@@ -187,15 +187,17 @@ int handle_IRQ_event(unsigned int irq,
struct
pt_regs
*
regs
,
struct
irqaction
*
action
)
struct
pt_regs
*
regs
,
struct
irqaction
*
action
)
{
{
int
status
=
1
;
/* Force the "do bottom halves" bit */
int
status
=
1
;
/* Force the "do bottom halves" bit */
int
retval
=
0
;
int
ret
,
ret
val
=
0
;
if
(
!
(
action
->
flags
&
SA_INTERRUPT
))
if
(
!
(
action
->
flags
&
SA_INTERRUPT
))
local_irq_enable
();
local_irq_enable
();
do
{
do
{
ret
=
action
->
handler
(
irq
,
action
->
dev_id
,
regs
);
if
(
ret
==
IRQ_HANDLED
)
status
|=
action
->
flags
;
status
|=
action
->
flags
;
retval
|=
action
->
handler
(
irq
,
action
->
dev_id
,
regs
);
action
=
action
->
next
;
action
=
action
->
next
;
retval
|=
ret
;
}
while
(
action
);
}
while
(
action
);
if
(
status
&
SA_SAMPLE_RANDOM
)
if
(
status
&
SA_SAMPLE_RANDOM
)
add_interrupt_randomness
(
irq
);
add_interrupt_randomness
(
irq
);
...
...
arch/m32r/kernel/setup.c
View file @
72747577
/*
/*
* linux/arch/m32r/kernel/setup.c
* linux/arch/m32r/kernel/setup.c
*
*
* Setup routines for
MITSUBISHI
M32R
* Setup routines for
Renesas
M32R
*
*
* Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
* Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
* Hitoshi Yamamoto
* Hitoshi Yamamoto
...
...
arch/m32r/kernel/setup_m32700ut.c
View file @
72747577
/*
/*
* linux/arch/m32r/kernel/setup_m32700ut.c
* linux/arch/m32r/kernel/setup_m32700ut.c
*
*
* Setup routines for
MITSUBISHI
M32700UT Board
* Setup routines for
Renesas
M32700UT Board
*
*
* Copyright (c) 2002 Hiroyuki Kondo, Hirokazu Takata,
* Copyright (c) 2002 Hiroyuki Kondo, Hirokazu Takata,
* Hitoshi Yamamoto, Takeo Takahashi
* Hitoshi Yamamoto, Takeo Takahashi
...
@@ -9,8 +9,6 @@
...
@@ -9,8 +9,6 @@
* This file is subject to the terms and conditions of the GNU General
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
* archive for more details.
*
* $Id: setup_m32700ut.c,v 1.6 2003/11/27 10:18:49 takeo Exp $
*/
*/
#include <linux/config.h>
#include <linux/config.h>
...
...
arch/m32r/kernel/setup_mappi.c
View file @
72747577
/*
/*
* linux/arch/m32r/kernel/setup_mappi.c
* linux/arch/m32r/kernel/setup_mappi.c
*
*
* Setup routines for
MITSUBISHI
MAPPI Board
* Setup routines for
Renesas
MAPPI Board
*
*
* Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
* Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
* Hitoshi Yamamoto
* Hitoshi Yamamoto
*/
*/
static
char
*
rcsid
=
"$Id$"
;
static
void
use_rcsid
(
void
)
{
rcsid
=
rcsid
;
use_rcsid
();}
#include <linux/config.h>
#include <linux/config.h>
#include <linux/irq.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
...
...
arch/m32r/kernel/setup_mappi2.c
View file @
72747577
...
@@ -7,10 +7,6 @@
...
@@ -7,10 +7,6 @@
* Hitoshi Yamamoto, Mamoru Sakugawa
* Hitoshi Yamamoto, Mamoru Sakugawa
*/
*/
static
char
*
rcsid
=
"$Id$"
;
static
void
use_rcsid
(
void
)
{
rcsid
=
rcsid
;
use_rcsid
();}
#include <linux/config.h>
#include <linux/config.h>
#include <linux/irq.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
...
...
arch/m32r/kernel/setup_oaks32r.c
View file @
72747577
...
@@ -7,10 +7,6 @@
...
@@ -7,10 +7,6 @@
* Hitoshi Yamamoto, Mamoru Sakugawa
* Hitoshi Yamamoto, Mamoru Sakugawa
*/
*/
static
char
*
rcsid
=
"$Id: setup_oaks32r.c,v 1.1 2004/03/31 05:06:18 sakugawa Exp $"
;
static
void
use_rcsid
(
void
)
{
rcsid
=
rcsid
;
use_rcsid
();}
#include <linux/config.h>
#include <linux/config.h>
#include <linux/irq.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
...
...
arch/m32r/kernel/setup_opsput.c
View file @
72747577
...
@@ -10,8 +10,6 @@
...
@@ -10,8 +10,6 @@
* This file is subject to the terms and conditions of the GNU General
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
* archive for more details.
*
* $Id: setup_opsput.c,v 1.1 2004/07/27 06:54:20 sakugawa Exp $
*/
*/
#include <linux/config.h>
#include <linux/config.h>
...
...
arch/m32r/kernel/setup_usrv.c
View file @
72747577
...
@@ -7,10 +7,6 @@
...
@@ -7,10 +7,6 @@
* Hitoshi Yamamoto
* Hitoshi Yamamoto
*/
*/
static
char
*
rcsid
=
"$Id$"
;
static
void
use_rcsid
(
void
)
{
rcsid
=
rcsid
;
use_rcsid
();}
#include <linux/config.h>
#include <linux/config.h>
#include <linux/irq.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
...
...
arch/m32r/kernel/signal.c
View file @
72747577
...
@@ -404,9 +404,7 @@ static void setup_frame(int sig, struct k_sigaction *ka,
...
@@ -404,9 +404,7 @@ static void setup_frame(int sig, struct k_sigaction *ka,
return
;
return
;
give_sigsegv:
give_sigsegv:
if
(
sig
==
SIGSEGV
)
force_sigsegv
(
sig
,
current
);
ka
->
sa
.
sa_handler
=
SIG_DFL
;
force_sig
(
SIGSEGV
,
current
);
}
}
static
void
setup_rt_frame
(
int
sig
,
struct
k_sigaction
*
ka
,
siginfo_t
*
info
,
static
void
setup_rt_frame
(
int
sig
,
struct
k_sigaction
*
ka
,
siginfo_t
*
info
,
...
@@ -482,9 +480,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
...
@@ -482,9 +480,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return
;
return
;
give_sigsegv:
give_sigsegv:
if
(
sig
==
SIGSEGV
)
force_sigsegv
(
sig
,
current
);
ka
->
sa
.
sa_handler
=
SIG_DFL
;
force_sig
(
SIGSEGV
,
current
);
}
}
/*
/*
...
@@ -528,9 +524,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
...
@@ -528,9 +524,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
else
else
setup_frame
(
sig
,
ka
,
oldset
,
regs
);
setup_frame
(
sig
,
ka
,
oldset
,
regs
);
if
(
ka
->
sa
.
sa_flags
&
SA_ONESHOT
)
ka
->
sa
.
sa_handler
=
SIG_DFL
;
if
(
!
(
ka
->
sa
.
sa_flags
&
SA_NODEFER
))
{
if
(
!
(
ka
->
sa
.
sa_flags
&
SA_NODEFER
))
{
spin_lock_irq
(
&
current
->
sighand
->
siglock
);
spin_lock_irq
(
&
current
->
sighand
->
siglock
);
sigorsets
(
&
current
->
blocked
,
&
current
->
blocked
,
&
ka
->
sa
.
sa_mask
);
sigorsets
(
&
current
->
blocked
,
&
current
->
blocked
,
&
ka
->
sa
.
sa_mask
);
...
...
arch/m32r/kernel/smp.c
View file @
72747577
...
@@ -441,9 +441,10 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
...
@@ -441,9 +441,10 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
*/
*/
send_IPI_mask
(
cpumask
,
INVALIDATE_TLB_IPI
,
0
);
send_IPI_mask
(
cpumask
,
INVALIDATE_TLB_IPI
,
0
);
while
(
!
cpus_empty
(
flush_cpumask
))
while
(
!
cpus_empty
(
flush_cpumask
))
{
/* nothing. lockup detection does not belong here */
/* nothing. lockup detection does not belong here */
mb
();
mb
();
}
flush_mm
=
NULL
;
flush_mm
=
NULL
;
flush_vma
=
NULL
;
flush_vma
=
NULL
;
...
...
arch/mips/vr41xx/common/icu.c
View file @
72747577
...
@@ -165,218 +165,268 @@ void vr41xx_enable_piuint(uint16_t mask)
...
@@ -165,218 +165,268 @@ void vr41xx_enable_piuint(uint16_t mask)
{
{
irq_desc_t
*
desc
=
irq_desc
+
PIU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
PIU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4111
||
current_cpu_data
.
cputype
==
CPU_VR4121
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu1
(
MPIUINTREG
);
set_icu1
(
MPIUINTREG
,
mask
);
val
|=
mask
;
write_icu1
(
val
,
MPIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_enable_piuint
);
void
vr41xx_disable_piuint
(
uint16_t
mask
)
void
vr41xx_disable_piuint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
PIU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
PIU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4111
||
current_cpu_data
.
cputype
==
CPU_VR4121
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu1
(
MPIUINTREG
);
clear_icu1
(
MPIUINTREG
,
mask
);
val
&=
~
mask
;
write_icu1
(
val
,
MPIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_disable_piuint
);
void
vr41xx_enable_aiuint
(
uint16_t
mask
)
void
vr41xx_enable_aiuint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
AIU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
AIU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4111
||
current_cpu_data
.
cputype
==
CPU_VR4121
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu1
(
MAIUINTREG
);
set_icu1
(
MAIUINTREG
,
mask
);
val
|=
mask
;
write_icu1
(
val
,
MAIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_enable_aiuint
);
void
vr41xx_disable_aiuint
(
uint16_t
mask
)
void
vr41xx_disable_aiuint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
AIU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
AIU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4111
||
current_cpu_data
.
cputype
==
CPU_VR4121
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu1
(
MAIUINTREG
);
clear_icu1
(
MAIUINTREG
,
mask
);
val
&=
~
mask
;
write_icu1
(
val
,
MAIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_disable_aiuint
);
void
vr41xx_enable_kiuint
(
uint16_t
mask
)
void
vr41xx_enable_kiuint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
KIU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
KIU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4111
||
current_cpu_data
.
cputype
==
CPU_VR4121
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu1
(
MKIUINTREG
);
set_icu1
(
MKIUINTREG
,
mask
);
val
|=
mask
;
write_icu1
(
val
,
MKIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_enable_kiuint
);
void
vr41xx_disable_kiuint
(
uint16_t
mask
)
void
vr41xx_disable_kiuint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
KIU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
KIU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4111
||
current_cpu_data
.
cputype
==
CPU_VR4121
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu1
(
MKIUINTREG
);
clear_icu1
(
MKIUINTREG
,
mask
);
val
&=
~
mask
;
write_icu1
(
val
,
MKIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_disable_kiuint
);
void
vr41xx_enable_dsiuint
(
uint16_t
mask
)
void
vr41xx_enable_dsiuint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
DSIU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
DSIU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu1
(
MDSIUINTREG
);
set_icu1
(
MDSIUINTREG
,
mask
);
val
|=
mask
;
write_icu1
(
val
,
MDSIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
EXPORT_SYMBOL
(
vr41xx_enable_dsiuint
);
void
vr41xx_disable_dsiuint
(
uint16_t
mask
)
void
vr41xx_disable_dsiuint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
DSIU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
DSIU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu1
(
MDSIUINTREG
);
clear_icu1
(
MDSIUINTREG
,
mask
);
val
&=
~
mask
;
write_icu1
(
val
,
MDSIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
EXPORT_SYMBOL
(
vr41xx_disable_dsiuint
);
void
vr41xx_enable_firint
(
uint16_t
mask
)
void
vr41xx_enable_firint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
FIR_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
FIR_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu2
(
MFIRINTREG
);
set_icu2
(
MFIRINTREG
,
mask
);
val
|=
mask
;
write_icu2
(
val
,
MFIRINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
EXPORT_SYMBOL
(
vr41xx_enable_firint
);
void
vr41xx_disable_firint
(
uint16_t
mask
)
void
vr41xx_disable_firint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
FIR_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
FIR_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu2
(
MFIRINTREG
);
clear_icu2
(
MFIRINTREG
,
mask
);
val
&=
~
mask
;
write_icu2
(
val
,
MFIRINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
EXPORT_SYMBOL
(
vr41xx_disable_firint
);
void
vr41xx_enable_pciint
(
void
)
void
vr41xx_enable_pciint
(
void
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
PCI_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
PCI_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4122
||
current_cpu_data
.
cputype
==
CPU_VR4131
||
current_cpu_data
.
cputype
==
CPU_VR4133
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
write_icu2
(
PCIINT0
,
MPCIINTREG
);
write_icu2
(
PCIINT0
,
MPCIINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_enable_pciint
);
void
vr41xx_disable_pciint
(
void
)
void
vr41xx_disable_pciint
(
void
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
PCI_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
PCI_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4122
||
current_cpu_data
.
cputype
==
CPU_VR4131
||
current_cpu_data
.
cputype
==
CPU_VR4133
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
write_icu2
(
0
,
MPCIINTREG
);
write_icu2
(
0
,
MPCIINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_disable_pciint
);
void
vr41xx_enable_scuint
(
void
)
void
vr41xx_enable_scuint
(
void
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
SCU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
SCU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4122
||
current_cpu_data
.
cputype
==
CPU_VR4131
||
current_cpu_data
.
cputype
==
CPU_VR4133
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
write_icu2
(
SCUINT0
,
MSCUINTREG
);
write_icu2
(
SCUINT0
,
MSCUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_enable_scuint
);
void
vr41xx_disable_scuint
(
void
)
void
vr41xx_disable_scuint
(
void
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
SCU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
SCU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4122
||
current_cpu_data
.
cputype
==
CPU_VR4131
||
current_cpu_data
.
cputype
==
CPU_VR4133
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
write_icu2
(
0
,
MSCUINTREG
);
write_icu2
(
0
,
MSCUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_disable_scuint
);
void
vr41xx_enable_csiint
(
uint16_t
mask
)
void
vr41xx_enable_csiint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
CSI_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
CSI_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4122
||
current_cpu_data
.
cputype
==
CPU_VR4131
||
current_cpu_data
.
cputype
==
CPU_VR4133
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu2
(
MCSIINTREG
);
set_icu2
(
MCSIINTREG
,
mask
);
val
|=
mask
;
write_icu2
(
val
,
MCSIINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_enable_csiint
);
void
vr41xx_disable_csiint
(
uint16_t
mask
)
void
vr41xx_disable_csiint
(
uint16_t
mask
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
CSI_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
CSI_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
uint16_t
val
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4122
||
current_cpu_data
.
cputype
==
CPU_VR4131
||
current_cpu_data
.
cputype
==
CPU_VR4133
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
read_icu2
(
MCSIINTREG
);
clear_icu2
(
MCSIINTREG
,
mask
);
val
&=
~
mask
;
write_icu2
(
val
,
MCSIINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_disable_csiint
);
void
vr41xx_enable_bcuint
(
void
)
void
vr41xx_enable_bcuint
(
void
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
BCU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
BCU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4122
||
current_cpu_data
.
cputype
==
CPU_VR4131
||
current_cpu_data
.
cputype
==
CPU_VR4133
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
write_icu2
(
BCUINTR
,
MBCUINTREG
);
write_icu2
(
BCUINTR
,
MBCUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_enable_bcuint
);
void
vr41xx_disable_bcuint
(
void
)
void
vr41xx_disable_bcuint
(
void
)
{
{
irq_desc_t
*
desc
=
irq_desc
+
BCU_IRQ
;
irq_desc_t
*
desc
=
irq_desc
+
BCU_IRQ
;
unsigned
long
flags
;
unsigned
long
flags
;
if
(
current_cpu_data
.
cputype
==
CPU_VR4122
||
current_cpu_data
.
cputype
==
CPU_VR4131
||
current_cpu_data
.
cputype
==
CPU_VR4133
)
{
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
write_icu2
(
0
,
MBCUINTREG
);
write_icu2
(
0
,
MBCUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
}
}
EXPORT_SYMBOL
(
vr41xx_disable_bcuint
);
/*=======================================================================*/
/*=======================================================================*/
static
unsigned
int
startup_sysint1_irq
(
unsigned
int
irq
)
static
unsigned
int
startup_sysint1_irq
(
unsigned
int
irq
)
...
...
arch/mips/vr41xx/common/vrc4173.c
View file @
72747577
...
@@ -316,6 +316,96 @@ static inline void vrc4173_giu_init(void)
...
@@ -316,6 +316,96 @@ static inline void vrc4173_giu_init(void)
spin_lock_init
(
&
vrc4173_giu_lock
);
spin_lock_init
(
&
vrc4173_giu_lock
);
}
}
void
vrc4173_enable_piuint
(
uint16_t
mask
)
{
irq_desc_t
*
desc
=
irq_desc
+
VRC4173_PIU_IRQ
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
vrc4173_inw
(
VRC4173_MPIUINTREG
);
val
|=
mask
;
vrc4173_outw
(
val
,
VRC4173_MPIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
EXPORT_SYMBOL
(
vrc4173_eanble_piuint
);
void
vrc4173_disable_piuint
(
uint16_t
mask
)
{
irq_desc_t
*
desc
=
irq_desc
+
VRC4173_PIU_IRQ
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
vrc4173_inw
(
VRC4173_MPIUINTREG
);
val
&=
~
mask
;
vrc4173_outw
(
val
,
VRC4173_MPIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
EXPORT_SYMBOL
(
vrc4173_disable_piuint
);
void
vrc4173_enable_aiuint
(
uint16_t
mask
)
{
irq_desc_t
*
desc
=
irq_desc
+
VRC4173_AIU_IRQ
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
vrc4173_inw
(
VRC4173_MAIUINTREG
);
val
|=
mask
;
vrc4173_outw
(
val
,
VRC4173_MAIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
EXPORT_SYMBOL
(
vrc4173_enable_aiuint
);
void
vrc4173_disable_aiuint
(
uint16_t
mask
)
{
irq_desc_t
*
desc
=
irq_desc
+
VRC4173_AIU_IRQ
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
vrc4173_inw
(
VRC4173_MAIUINTREG
);
val
&=
~
mask
;
vrc4173_outw
(
val
,
VRC4173_MAIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
EXPORT_SYMBOL
(
vrc4173_disable_aiuint
);
void
vrc4173_enable_kiuint
(
uint16_t
mask
)
{
irq_desc_t
*
desc
=
irq_desc
+
VRC4173_KIU_IRQ
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
vrc4173_inw
(
VRC4173_MKIUINTREG
);
val
|=
mask
;
vrc4173_outw
(
val
,
VRC4173_MKIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
EXPORT_SYMBOL
(
vrc4173_enable_kiuint
);
void
vrc4173_disable_kiuint
(
uint16_t
mask
)
{
irq_desc_t
*
desc
=
irq_desc
+
VRC4173_KIU_IRQ
;
unsigned
long
flags
;
uint16_t
val
;
spin_lock_irqsave
(
&
desc
->
lock
,
flags
);
val
=
vrc4173_inw
(
VRC4173_MKIUINTREG
);
val
&=
~
mask
;
vrc4173_outw
(
val
,
VRC4173_MKIUINTREG
);
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
EXPORT_SYMBOL
(
vrc4173_disable_kiuint
);
static
void
enable_vrc4173_irq
(
unsigned
int
irq
)
static
void
enable_vrc4173_irq
(
unsigned
int
irq
)
{
{
uint16_t
val
;
uint16_t
val
;
...
...
arch/ppc64/kernel/process.c
View file @
72747577
...
@@ -410,7 +410,7 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
...
@@ -410,7 +410,7 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
unsigned
long
childregs
=
(
unsigned
long
)
current
->
thread_info
+
unsigned
long
childregs
=
(
unsigned
long
)
current
->
thread_info
+
THREAD_SIZE
;
THREAD_SIZE
;
childregs
-=
sizeof
(
struct
pt_regs
);
childregs
-=
sizeof
(
struct
pt_regs
);
current
->
thread
.
regs
=
childregs
;
current
->
thread
.
regs
=
(
struct
pt_regs
*
)
childregs
;
}
}
regs
->
nip
=
entry
;
regs
->
nip
=
entry
;
...
...
arch/ppc64/kernel/sys_ppc32.c
View file @
72747577
...
@@ -642,7 +642,7 @@ void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
...
@@ -642,7 +642,7 @@ void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
unsigned
long
childregs
=
(
unsigned
long
)
current
->
thread_info
+
unsigned
long
childregs
=
(
unsigned
long
)
current
->
thread_info
+
THREAD_SIZE
;
THREAD_SIZE
;
childregs
-=
sizeof
(
struct
pt_regs
);
childregs
-=
sizeof
(
struct
pt_regs
);
current
->
thread
.
regs
=
childregs
;
current
->
thread
.
regs
=
(
struct
pt_regs
*
)
childregs
;
}
}
/*
/*
...
...
arch/sparc64/kernel/kprobes.c
View file @
72747577
...
@@ -179,26 +179,26 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
...
@@ -179,26 +179,26 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
switch
(
val
)
{
switch
(
val
)
{
case
DIE_DEBUG
:
case
DIE_DEBUG
:
if
(
kprobe_handler
(
args
->
regs
))
if
(
kprobe_handler
(
args
->
regs
))
return
NOTIFY_
OK
;
return
NOTIFY_
STOP
;
break
;
break
;
case
DIE_DEBUG_2
:
case
DIE_DEBUG_2
:
if
(
post_kprobe_handler
(
args
->
regs
))
if
(
post_kprobe_handler
(
args
->
regs
))
return
NOTIFY_
OK
;
return
NOTIFY_
STOP
;
break
;
break
;
case
DIE_GPF
:
case
DIE_GPF
:
if
(
kprobe_running
()
&&
if
(
kprobe_running
()
&&
kprobe_fault_handler
(
args
->
regs
,
args
->
trapnr
))
kprobe_fault_handler
(
args
->
regs
,
args
->
trapnr
))
return
NOTIFY_
OK
;
return
NOTIFY_
STOP
;
break
;
break
;
case
DIE_PAGE_FAULT
:
case
DIE_PAGE_FAULT
:
if
(
kprobe_running
()
&&
if
(
kprobe_running
()
&&
kprobe_fault_handler
(
args
->
regs
,
args
->
trapnr
))
kprobe_fault_handler
(
args
->
regs
,
args
->
trapnr
))
return
NOTIFY_
OK
;
return
NOTIFY_
STOP
;
break
;
break
;
default:
default:
break
;
break
;
}
}
return
NOTIFY_
BAD
;
return
NOTIFY_
DONE
;
}
}
asmlinkage
void
kprobe_trap
(
unsigned
long
trap_level
,
struct
pt_regs
*
regs
)
asmlinkage
void
kprobe_trap
(
unsigned
long
trap_level
,
struct
pt_regs
*
regs
)
...
@@ -216,7 +216,7 @@ asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs)
...
@@ -216,7 +216,7 @@ asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs)
*/
*/
if
(
notify_die
((
trap_level
==
0x170
)
?
DIE_DEBUG
:
DIE_DEBUG_2
,
if
(
notify_die
((
trap_level
==
0x170
)
?
DIE_DEBUG
:
DIE_DEBUG_2
,
(
trap_level
==
0x170
)
?
"debug"
:
"debug_2"
,
(
trap_level
==
0x170
)
?
"debug"
:
"debug_2"
,
regs
,
0
,
trap_level
,
SIGTRAP
)
!=
NOTIFY_
OK
)
regs
,
0
,
trap_level
,
SIGTRAP
)
!=
NOTIFY_
STOP
)
bad_trap
(
regs
,
trap_level
);
bad_trap
(
regs
,
trap_level
);
}
}
...
...
arch/sparc64/kernel/traps.c
View file @
72747577
...
@@ -96,7 +96,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
...
@@ -96,7 +96,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
siginfo_t
info
;
siginfo_t
info
;
if
(
notify_die
(
DIE_TRAP
,
"bad trap"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"bad trap"
,
regs
,
0
,
lvl
,
SIGTRAP
)
==
NOTIFY_
OK
)
0
,
lvl
,
SIGTRAP
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
lvl
<
0x100
)
{
if
(
lvl
<
0x100
)
{
...
@@ -126,7 +126,7 @@ void bad_trap_tl1(struct pt_regs *regs, long lvl)
...
@@ -126,7 +126,7 @@ void bad_trap_tl1(struct pt_regs *regs, long lvl)
char
buffer
[
32
];
char
buffer
[
32
];
if
(
notify_die
(
DIE_TRAP_TL1
,
"bad trap tl1"
,
regs
,
if
(
notify_die
(
DIE_TRAP_TL1
,
"bad trap tl1"
,
regs
,
0
,
lvl
,
SIGTRAP
)
==
NOTIFY_
OK
)
0
,
lvl
,
SIGTRAP
)
==
NOTIFY_
STOP
)
return
;
return
;
dump_tl1_traplog
((
struct
tl1_traplog
*
)(
regs
+
1
));
dump_tl1_traplog
((
struct
tl1_traplog
*
)(
regs
+
1
));
...
@@ -149,7 +149,7 @@ void instruction_access_exception(struct pt_regs *regs,
...
@@ -149,7 +149,7 @@ void instruction_access_exception(struct pt_regs *regs,
siginfo_t
info
;
siginfo_t
info
;
if
(
notify_die
(
DIE_TRAP
,
"instruction access exception"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"instruction access exception"
,
regs
,
0
,
0x8
,
SIGTRAP
)
==
NOTIFY_
OK
)
0
,
0x8
,
SIGTRAP
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
regs
->
tstate
&
TSTATE_PRIV
)
{
if
(
regs
->
tstate
&
TSTATE_PRIV
)
{
...
@@ -173,7 +173,7 @@ void instruction_access_exception_tl1(struct pt_regs *regs,
...
@@ -173,7 +173,7 @@ void instruction_access_exception_tl1(struct pt_regs *regs,
unsigned
long
sfsr
,
unsigned
long
sfar
)
unsigned
long
sfsr
,
unsigned
long
sfar
)
{
{
if
(
notify_die
(
DIE_TRAP_TL1
,
"instruction access exception tl1"
,
regs
,
if
(
notify_die
(
DIE_TRAP_TL1
,
"instruction access exception tl1"
,
regs
,
0
,
0x8
,
SIGTRAP
)
==
NOTIFY_
OK
)
0
,
0x8
,
SIGTRAP
)
==
NOTIFY_
STOP
)
return
;
return
;
dump_tl1_traplog
((
struct
tl1_traplog
*
)(
regs
+
1
));
dump_tl1_traplog
((
struct
tl1_traplog
*
)(
regs
+
1
));
...
@@ -186,7 +186,7 @@ void data_access_exception(struct pt_regs *regs,
...
@@ -186,7 +186,7 @@ void data_access_exception(struct pt_regs *regs,
siginfo_t
info
;
siginfo_t
info
;
if
(
notify_die
(
DIE_TRAP
,
"data access exception"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"data access exception"
,
regs
,
0
,
0x30
,
SIGTRAP
)
==
NOTIFY_
OK
)
0
,
0x30
,
SIGTRAP
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
regs
->
tstate
&
TSTATE_PRIV
)
{
if
(
regs
->
tstate
&
TSTATE_PRIV
)
{
...
@@ -260,7 +260,7 @@ void do_iae(struct pt_regs *regs)
...
@@ -260,7 +260,7 @@ void do_iae(struct pt_regs *regs)
spitfire_clean_and_reenable_l1_caches
();
spitfire_clean_and_reenable_l1_caches
();
if
(
notify_die
(
DIE_TRAP
,
"instruction access exception"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"instruction access exception"
,
regs
,
0
,
0x8
,
SIGTRAP
)
==
NOTIFY_
OK
)
0
,
0x8
,
SIGTRAP
)
==
NOTIFY_
STOP
)
return
;
return
;
info
.
si_signo
=
SIGBUS
;
info
.
si_signo
=
SIGBUS
;
...
@@ -292,7 +292,7 @@ void do_dae(struct pt_regs *regs)
...
@@ -292,7 +292,7 @@ void do_dae(struct pt_regs *regs)
spitfire_clean_and_reenable_l1_caches
();
spitfire_clean_and_reenable_l1_caches
();
if
(
notify_die
(
DIE_TRAP
,
"data access exception"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"data access exception"
,
regs
,
0
,
0x30
,
SIGTRAP
)
==
NOTIFY_
OK
)
0
,
0x30
,
SIGTRAP
)
==
NOTIFY_
STOP
)
return
;
return
;
info
.
si_signo
=
SIGBUS
;
info
.
si_signo
=
SIGBUS
;
...
@@ -1695,7 +1695,7 @@ void do_fpe_common(struct pt_regs *regs)
...
@@ -1695,7 +1695,7 @@ void do_fpe_common(struct pt_regs *regs)
void
do_fpieee
(
struct
pt_regs
*
regs
)
void
do_fpieee
(
struct
pt_regs
*
regs
)
{
{
if
(
notify_die
(
DIE_TRAP
,
"fpu exception ieee"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"fpu exception ieee"
,
regs
,
0
,
0x24
,
SIGFPE
)
==
NOTIFY_
OK
)
0
,
0x24
,
SIGFPE
)
==
NOTIFY_
STOP
)
return
;
return
;
do_fpe_common
(
regs
);
do_fpe_common
(
regs
);
...
@@ -1709,7 +1709,7 @@ void do_fpother(struct pt_regs *regs)
...
@@ -1709,7 +1709,7 @@ void do_fpother(struct pt_regs *regs)
int
ret
=
0
;
int
ret
=
0
;
if
(
notify_die
(
DIE_TRAP
,
"fpu exception other"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"fpu exception other"
,
regs
,
0
,
0x25
,
SIGFPE
)
==
NOTIFY_
OK
)
0
,
0x25
,
SIGFPE
)
==
NOTIFY_
STOP
)
return
;
return
;
switch
((
current_thread_info
()
->
xfsr
[
0
]
&
0x1c000
))
{
switch
((
current_thread_info
()
->
xfsr
[
0
]
&
0x1c000
))
{
...
@@ -1728,7 +1728,7 @@ void do_tof(struct pt_regs *regs)
...
@@ -1728,7 +1728,7 @@ void do_tof(struct pt_regs *regs)
siginfo_t
info
;
siginfo_t
info
;
if
(
notify_die
(
DIE_TRAP
,
"tagged arithmetic overflow"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"tagged arithmetic overflow"
,
regs
,
0
,
0x26
,
SIGEMT
)
==
NOTIFY_
OK
)
0
,
0x26
,
SIGEMT
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
regs
->
tstate
&
TSTATE_PRIV
)
if
(
regs
->
tstate
&
TSTATE_PRIV
)
...
@@ -1750,7 +1750,7 @@ void do_div0(struct pt_regs *regs)
...
@@ -1750,7 +1750,7 @@ void do_div0(struct pt_regs *regs)
siginfo_t
info
;
siginfo_t
info
;
if
(
notify_die
(
DIE_TRAP
,
"integer division by zero"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"integer division by zero"
,
regs
,
0
,
0x28
,
SIGFPE
)
==
NOTIFY_
OK
)
0
,
0x28
,
SIGFPE
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
regs
->
tstate
&
TSTATE_PRIV
)
if
(
regs
->
tstate
&
TSTATE_PRIV
)
...
@@ -1936,7 +1936,7 @@ void do_illegal_instruction(struct pt_regs *regs)
...
@@ -1936,7 +1936,7 @@ void do_illegal_instruction(struct pt_regs *regs)
siginfo_t
info
;
siginfo_t
info
;
if
(
notify_die
(
DIE_TRAP
,
"illegal instruction"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"illegal instruction"
,
regs
,
0
,
0x10
,
SIGILL
)
==
NOTIFY_
OK
)
0
,
0x10
,
SIGILL
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
tstate
&
TSTATE_PRIV
)
if
(
tstate
&
TSTATE_PRIV
)
...
@@ -1965,7 +1965,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
...
@@ -1965,7 +1965,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
siginfo_t
info
;
siginfo_t
info
;
if
(
notify_die
(
DIE_TRAP
,
"memory address unaligned"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"memory address unaligned"
,
regs
,
0
,
0x34
,
SIGSEGV
)
==
NOTIFY_
OK
)
0
,
0x34
,
SIGSEGV
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
regs
->
tstate
&
TSTATE_PRIV
)
{
if
(
regs
->
tstate
&
TSTATE_PRIV
)
{
...
@@ -1991,7 +1991,7 @@ void do_privop(struct pt_regs *regs)
...
@@ -1991,7 +1991,7 @@ void do_privop(struct pt_regs *regs)
siginfo_t
info
;
siginfo_t
info
;
if
(
notify_die
(
DIE_TRAP
,
"privileged operation"
,
regs
,
if
(
notify_die
(
DIE_TRAP
,
"privileged operation"
,
regs
,
0
,
0x11
,
SIGILL
)
==
NOTIFY_
OK
)
0
,
0x11
,
SIGILL
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
test_thread_flag
(
TIF_32BIT
))
{
if
(
test_thread_flag
(
TIF_32BIT
))
{
...
...
arch/sparc64/mm/fault.c
View file @
72747577
...
@@ -149,7 +149,7 @@ static void unhandled_fault(unsigned long address, struct task_struct *tsk,
...
@@ -149,7 +149,7 @@ static void unhandled_fault(unsigned long address, struct task_struct *tsk,
(
tsk
->
mm
?
(
unsigned
long
)
tsk
->
mm
->
pgd
:
(
tsk
->
mm
?
(
unsigned
long
)
tsk
->
mm
->
pgd
:
(
unsigned
long
)
tsk
->
active_mm
->
pgd
));
(
unsigned
long
)
tsk
->
active_mm
->
pgd
));
if
(
notify_die
(
DIE_GPF
,
"general protection fault"
,
regs
,
if
(
notify_die
(
DIE_GPF
,
"general protection fault"
,
regs
,
0
,
0
,
SIGSEGV
)
==
NOTIFY_
OK
)
0
,
0
,
SIGSEGV
)
==
NOTIFY_
STOP
)
return
;
return
;
die_if_kernel
(
"Oops"
,
regs
);
die_if_kernel
(
"Oops"
,
regs
);
}
}
...
@@ -325,7 +325,7 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
...
@@ -325,7 +325,7 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
fault_code
=
get_thread_fault_code
();
fault_code
=
get_thread_fault_code
();
if
(
notify_die
(
DIE_PAGE_FAULT
,
"page_fault"
,
regs
,
if
(
notify_die
(
DIE_PAGE_FAULT
,
"page_fault"
,
regs
,
fault_code
,
0
,
SIGSEGV
)
==
NOTIFY_
OK
)
fault_code
,
0
,
SIGSEGV
)
==
NOTIFY_
STOP
)
return
;
return
;
si_code
=
SEGV_MAPERR
;
si_code
=
SEGV_MAPERR
;
...
...
arch/x86_64/kernel/nmi.c
View file @
72747577
...
@@ -390,7 +390,8 @@ void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
...
@@ -390,7 +390,8 @@ void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
*/
*/
alert_counter
[
cpu
]
++
;
alert_counter
[
cpu
]
++
;
if
(
alert_counter
[
cpu
]
==
5
*
nmi_hz
)
{
if
(
alert_counter
[
cpu
]
==
5
*
nmi_hz
)
{
if
(
notify_die
(
DIE_NMI
,
"nmi"
,
regs
,
reason
,
2
,
SIGINT
)
==
NOTIFY_BAD
)
{
if
(
notify_die
(
DIE_NMI
,
"nmi"
,
regs
,
reason
,
2
,
SIGINT
)
==
NOTIFY_STOP
)
{
alert_counter
[
cpu
]
=
0
;
alert_counter
[
cpu
]
=
0
;
return
;
return
;
}
}
...
...
arch/x86_64/kernel/traps.c
View file @
72747577
...
@@ -437,7 +437,8 @@ static void do_trap(int trapnr, int signr, char *str,
...
@@ -437,7 +437,8 @@ static void do_trap(int trapnr, int signr, char *str,
#define DO_ERROR(trapnr, signr, str, name) \
#define DO_ERROR(trapnr, signr, str, name) \
asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
{ \
{ \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) == NOTIFY_BAD) \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \
return; \
do_trap(trapnr, signr, str, regs, error_code, NULL); \
do_trap(trapnr, signr, str, regs, error_code, NULL); \
}
}
...
@@ -450,7 +451,8 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
...
@@ -450,7 +451,8 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
info.si_errno = 0; \
info.si_errno = 0; \
info.si_code = sicode; \
info.si_code = sicode; \
info.si_addr = (void __user *)siaddr; \
info.si_addr = (void __user *)siaddr; \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) == NOTIFY_BAD) \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \
return; \
do_trap(trapnr, signr, str, regs, error_code, &info); \
do_trap(trapnr, signr, str, regs, error_code, &info); \
}
}
...
@@ -471,7 +473,8 @@ DO_ERROR(18, SIGSEGV, "reserved", reserved)
...
@@ -471,7 +473,8 @@ DO_ERROR(18, SIGSEGV, "reserved", reserved)
asmlinkage void *do_##name(struct pt_regs * regs, long error_code) \
asmlinkage void *do_##name(struct pt_regs * regs, long error_code) \
{ \
{ \
struct pt_regs *pr = ((struct pt_regs *)(current->thread.rsp0))-1; \
struct pt_regs *pr = ((struct pt_regs *)(current->thread.rsp0))-1; \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) == NOTIFY_BAD) \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return regs; \
return regs; \
if (regs->cs & 3) { \
if (regs->cs & 3) { \
memcpy(pr, regs, sizeof(struct pt_regs)); \
memcpy(pr, regs, sizeof(struct pt_regs)); \
...
@@ -565,7 +568,8 @@ asmlinkage void default_do_nmi(struct pt_regs * regs)
...
@@ -565,7 +568,8 @@ asmlinkage void default_do_nmi(struct pt_regs * regs)
unsigned
char
reason
=
inb
(
0x61
);
unsigned
char
reason
=
inb
(
0x61
);
if
(
!
(
reason
&
0xc0
))
{
if
(
!
(
reason
&
0xc0
))
{
if
(
notify_die
(
DIE_NMI_IPI
,
"nmi_ipi"
,
regs
,
reason
,
0
,
SIGINT
)
==
NOTIFY_BAD
)
if
(
notify_die
(
DIE_NMI_IPI
,
"nmi_ipi"
,
regs
,
reason
,
0
,
SIGINT
)
==
NOTIFY_STOP
)
return
;
return
;
#ifdef CONFIG_X86_LOCAL_APIC
#ifdef CONFIG_X86_LOCAL_APIC
/*
/*
...
@@ -580,7 +584,7 @@ asmlinkage void default_do_nmi(struct pt_regs * regs)
...
@@ -580,7 +584,7 @@ asmlinkage void default_do_nmi(struct pt_regs * regs)
unknown_nmi_error
(
reason
,
regs
);
unknown_nmi_error
(
reason
,
regs
);
return
;
return
;
}
}
if
(
notify_die
(
DIE_NMI
,
"nmi"
,
regs
,
reason
,
0
,
SIGINT
)
==
NOTIFY_
BAD
)
if
(
notify_die
(
DIE_NMI
,
"nmi"
,
regs
,
reason
,
0
,
SIGINT
)
==
NOTIFY_
STOP
)
return
;
return
;
if
(
reason
&
0x80
)
if
(
reason
&
0x80
)
mem_parity_error
(
reason
,
regs
);
mem_parity_error
(
reason
,
regs
);
...
@@ -676,7 +680,7 @@ asmlinkage void *do_debug(struct pt_regs * regs, unsigned long error_code)
...
@@ -676,7 +680,7 @@ asmlinkage void *do_debug(struct pt_regs * regs, unsigned long error_code)
clear_TF:
clear_TF:
/* RED-PEN could cause spurious errors */
/* RED-PEN could cause spurious errors */
if
(
notify_die
(
DIE_DEBUG
,
"debug2"
,
regs
,
condition
,
1
,
SIGTRAP
)
if
(
notify_die
(
DIE_DEBUG
,
"debug2"
,
regs
,
condition
,
1
,
SIGTRAP
)
!=
NOTIFY_BAD
)
!=
NOTIFY_STOP
)
regs
->
eflags
&=
~
TF_MASK
;
regs
->
eflags
&=
~
TF_MASK
;
return
regs
;
return
regs
;
}
}
...
...
drivers/block/ioctl.c
View file @
72747577
...
@@ -194,7 +194,8 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
...
@@ -194,7 +194,8 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return
-
EACCES
;
return
-
EACCES
;
if
(
disk
->
fops
->
ioctl
)
{
if
(
disk
->
fops
->
ioctl
)
{
ret
=
disk
->
fops
->
ioctl
(
inode
,
file
,
cmd
,
arg
);
ret
=
disk
->
fops
->
ioctl
(
inode
,
file
,
cmd
,
arg
);
if
(
ret
!=
-
EINVAL
)
/* -EINVAL to handle old uncorrected drivers */
if
(
ret
!=
-
EINVAL
&&
ret
!=
-
ENOTTY
)
return
ret
;
return
ret
;
}
}
fsync_bdev
(
bdev
);
fsync_bdev
(
bdev
);
...
...
drivers/char/random.c
View file @
72747577
...
@@ -807,10 +807,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
...
@@ -807,10 +807,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
long
delta
,
delta2
,
delta3
;
long
delta
,
delta2
,
delta3
;
int
entropy
=
0
;
int
entropy
=
0
;
preempt_disable
();
/* if over the trickle threshold, use only 1 in 4096 samples */
/* if over the trickle threshold, use only 1 in 4096 samples */
if
(
random_state
->
entropy_count
>
trickle_thresh
&&
if
(
random_state
->
entropy_count
>
trickle_thresh
&&
(
__get_cpu_var
(
trickle_count
)
++
&
0xfff
))
(
__get_cpu_var
(
trickle_count
)
++
&
0xfff
))
return
;
goto
out
;
/*
/*
* Use get_cycles() if implemented, otherwise fall back to
* Use get_cycles() if implemented, otherwise fall back to
...
@@ -861,6 +862,8 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
...
@@ -861,6 +862,8 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
entropy
=
int_ln_12bits
(
delta
);
entropy
=
int_ln_12bits
(
delta
);
}
}
batch_entropy_store
(
num
,
time
,
entropy
);
batch_entropy_store
(
num
,
time
,
entropy
);
out:
preempt_enable
();
}
}
void
add_keyboard_randomness
(
unsigned
char
scancode
)
void
add_keyboard_randomness
(
unsigned
char
scancode
)
...
...
drivers/cpufreq/cpufreq_ondemand.c
View file @
72747577
...
@@ -59,7 +59,7 @@ static unsigned int def_sampling_rate;
...
@@ -59,7 +59,7 @@ static unsigned int def_sampling_rate;
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (10)
#define DEF_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
#define sampling_rate_in_HZ(x) ((
x * HZ) / (1000 * 1000
))
#define sampling_rate_in_HZ(x) ((
(x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000)
))
static
void
do_dbs_timer
(
void
*
data
);
static
void
do_dbs_timer
(
void
*
data
);
...
@@ -221,6 +221,7 @@ static struct attribute_group dbs_attr_group = {
...
@@ -221,6 +221,7 @@ static struct attribute_group dbs_attr_group = {
static
void
dbs_check_cpu
(
int
cpu
)
static
void
dbs_check_cpu
(
int
cpu
)
{
{
unsigned
int
idle_ticks
,
up_idle_ticks
,
down_idle_ticks
;
unsigned
int
idle_ticks
,
up_idle_ticks
,
down_idle_ticks
;
unsigned
int
total_idle_ticks
;
unsigned
int
freq_down_step
;
unsigned
int
freq_down_step
;
unsigned
int
freq_down_sampling_rate
;
unsigned
int
freq_down_sampling_rate
;
static
int
down_skip
[
NR_CPUS
];
static
int
down_skip
[
NR_CPUS
];
...
@@ -244,19 +245,23 @@ static void dbs_check_cpu(int cpu)
...
@@ -244,19 +245,23 @@ static void dbs_check_cpu(int cpu)
* 5% of max_frequency
* 5% of max_frequency
*/
*/
/* Check for frequency increase */
/* Check for frequency increase */
idle_ticks
=
kstat_cpu
(
cpu
).
cpustat
.
idle
-
total_idle_ticks
=
kstat_cpu
(
cpu
).
cpustat
.
idle
+
kstat_cpu
(
cpu
).
cpustat
.
iowait
;
idle_ticks
=
total_idle_ticks
-
this_dbs_info
->
prev_cpu_idle_up
;
this_dbs_info
->
prev_cpu_idle_up
;
this_dbs_info
->
prev_cpu_idle_up
=
kstat_cpu
(
cpu
).
cpustat
.
idle
;
this_dbs_info
->
prev_cpu_idle_up
=
total_idle_ticks
;
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks
*=
100
;
up_idle_ticks
=
(
100
-
dbs_tuners_ins
.
up_threshold
)
*
up_idle_ticks
=
(
100
-
dbs_tuners_ins
.
up_threshold
)
*
sampling_rate_in_HZ
(
dbs_tuners_ins
.
sampling_rate
)
/
100
;
sampling_rate_in_HZ
(
dbs_tuners_ins
.
sampling_rate
);
if
(
idle_ticks
<
up_idle_ticks
)
{
if
(
idle_ticks
<
up_idle_ticks
)
{
__cpufreq_driver_target
(
this_dbs_info
->
cur_policy
,
__cpufreq_driver_target
(
this_dbs_info
->
cur_policy
,
this_dbs_info
->
cur_policy
->
max
,
this_dbs_info
->
cur_policy
->
max
,
CPUFREQ_RELATION_H
);
CPUFREQ_RELATION_H
);
down_skip
[
cpu
]
=
0
;
down_skip
[
cpu
]
=
0
;
this_dbs_info
->
prev_cpu_idle_down
=
kstat_cpu
(
cpu
).
cpustat
.
idle
;
this_dbs_info
->
prev_cpu_idle_down
=
total_idle_ticks
;
return
;
return
;
}
}
...
@@ -265,18 +270,25 @@ static void dbs_check_cpu(int cpu)
...
@@ -265,18 +270,25 @@ static void dbs_check_cpu(int cpu)
if
(
down_skip
[
cpu
]
<
dbs_tuners_ins
.
sampling_down_factor
)
if
(
down_skip
[
cpu
]
<
dbs_tuners_ins
.
sampling_down_factor
)
return
;
return
;
idle_ticks
=
kstat_cpu
(
cpu
).
cpustat
.
idle
-
idle_ticks
=
total_idle_ticks
-
this_dbs_info
->
prev_cpu_idle_down
;
this_dbs_info
->
prev_cpu_idle_down
;
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks
*=
100
;
down_skip
[
cpu
]
=
0
;
down_skip
[
cpu
]
=
0
;
this_dbs_info
->
prev_cpu_idle_down
=
kstat_cpu
(
cpu
).
cpustat
.
idle
;
this_dbs_info
->
prev_cpu_idle_down
=
total_idle_ticks
;
freq_down_sampling_rate
=
dbs_tuners_ins
.
sampling_rate
*
freq_down_sampling_rate
=
dbs_tuners_ins
.
sampling_rate
*
dbs_tuners_ins
.
sampling_down_factor
;
dbs_tuners_ins
.
sampling_down_factor
;
down_idle_ticks
=
(
100
-
dbs_tuners_ins
.
down_threshold
)
*
down_idle_ticks
=
(
100
-
dbs_tuners_ins
.
down_threshold
)
*
sampling_rate_in_HZ
(
freq_down_sampling_rate
)
/
100
;
sampling_rate_in_HZ
(
freq_down_sampling_rate
);
if
(
idle_ticks
>
down_idle_ticks
)
{
if
(
idle_ticks
>
down_idle_ticks
)
{
freq_down_step
=
(
5
*
this_dbs_info
->
cur_policy
->
max
)
/
100
;
freq_down_step
=
(
5
*
this_dbs_info
->
cur_policy
->
max
)
/
100
;
/* max freq cannot be less than 100. But who knows.... */
if
(
unlikely
(
freq_down_step
==
0
))
freq_down_step
=
5
;
__cpufreq_driver_target
(
this_dbs_info
->
cur_policy
,
__cpufreq_driver_target
(
this_dbs_info
->
cur_policy
,
this_dbs_info
->
cur_policy
->
cur
-
freq_down_step
,
this_dbs_info
->
cur_policy
->
cur
-
freq_down_step
,
CPUFREQ_RELATION_H
);
CPUFREQ_RELATION_H
);
...
@@ -333,9 +345,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
...
@@ -333,9 +345,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info
->
cur_policy
=
policy
;
this_dbs_info
->
cur_policy
=
policy
;
this_dbs_info
->
prev_cpu_idle_up
=
this_dbs_info
->
prev_cpu_idle_up
=
kstat_cpu
(
cpu
).
cpustat
.
idle
;
kstat_cpu
(
cpu
).
cpustat
.
idle
+
kstat_cpu
(
cpu
).
cpustat
.
iowait
;
this_dbs_info
->
prev_cpu_idle_down
=
this_dbs_info
->
prev_cpu_idle_down
=
kstat_cpu
(
cpu
).
cpustat
.
idle
;
kstat_cpu
(
cpu
).
cpustat
.
idle
+
kstat_cpu
(
cpu
).
cpustat
.
iowait
;
this_dbs_info
->
enable
=
1
;
this_dbs_info
->
enable
=
1
;
sysfs_create_group
(
&
policy
->
kobj
,
&
dbs_attr_group
);
sysfs_create_group
(
&
policy
->
kobj
,
&
dbs_attr_group
);
dbs_enable
++
;
dbs_enable
++
;
...
@@ -344,8 +358,14 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
...
@@ -344,8 +358,14 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
* is used for first time
* is used for first time
*/
*/
if
(
dbs_enable
==
1
)
{
if
(
dbs_enable
==
1
)
{
unsigned
int
latency
;
/* policy latency is in nS. Convert it to uS first */
/* policy latency is in nS. Convert it to uS first */
def_sampling_rate
=
(
policy
->
cpuinfo
.
transition_latency
/
1000
)
*
latency
=
policy
->
cpuinfo
.
transition_latency
;
if
(
latency
<
1000
)
latency
=
1000
;
def_sampling_rate
=
(
latency
/
1000
)
*
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER
;
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER
;
dbs_tuners_ins
.
sampling_rate
=
def_sampling_rate
;
dbs_tuners_ins
.
sampling_rate
=
def_sampling_rate
;
...
...
drivers/firmware/Kconfig
View file @
72747577
...
@@ -14,8 +14,9 @@ config EDD
...
@@ -14,8 +14,9 @@ config EDD
Services real mode BIOS calls to determine which disk
Services real mode BIOS calls to determine which disk
BIOS tries boot from. This information is then exported via sysfs.
BIOS tries boot from. This information is then exported via sysfs.
This option is experimental, but believed to be safe,
This option is experimental and is known to fail to boot on some
and most disk controller BIOS vendors do not yet implement this feature.
obscure configurations. Most disk controller BIOS vendors do
not yet implement this feature.
config EFI_VARS
config EFI_VARS
tristate "EFI Variable Support via sysfs"
tristate "EFI Variable Support via sysfs"
...
...
drivers/macintosh/therm_adt746x.c
View file @
72747577
...
@@ -28,6 +28,7 @@
...
@@ -28,6 +28,7 @@
#include <asm/system.h>
#include <asm/system.h>
#include <asm/sections.h>
#include <asm/sections.h>
#include <asm/of_device.h>
#include <asm/of_device.h>
#include <linux/kthread.h>
#undef DEBUG
#undef DEBUG
...
@@ -70,9 +71,7 @@ static enum {ADT7460, ADT7467} therm_type;
...
@@ -70,9 +71,7 @@ static enum {ADT7460, ADT7467} therm_type;
static
int
therm_bus
,
therm_address
;
static
int
therm_bus
,
therm_address
;
static
struct
of_device
*
of_dev
;
static
struct
of_device
*
of_dev
;
static
struct
thermostat
*
thermostat
;
static
struct
thermostat
*
thermostat
;
static
pid_t
monitor_thread_id
;
static
struct
task_struct
*
thread_therm
=
NULL
;
static
int
monitor_running
;
static
struct
completion
monitor_task_compl
;
static
int
attach_one_thermostat
(
struct
i2c_adapter
*
adapter
,
int
addr
,
int
busno
);
static
int
attach_one_thermostat
(
struct
i2c_adapter
*
adapter
,
int
addr
,
int
busno
);
static
void
write_both_fan_speed
(
struct
thermostat
*
th
,
int
speed
);
static
void
write_both_fan_speed
(
struct
thermostat
*
th
,
int
speed
);
...
@@ -136,9 +135,8 @@ detach_thermostat(struct i2c_adapter *adapter)
...
@@ -136,9 +135,8 @@ detach_thermostat(struct i2c_adapter *adapter)
th
=
thermostat
;
th
=
thermostat
;
if
(
monitor_running
)
{
if
(
thread_therm
!=
NULL
)
{
monitor_running
=
0
;
kthread_stop
(
thread_therm
);
wait_for_completion
(
&
monitor_task_compl
);
}
}
printk
(
KERN_INFO
"adt746x: Putting max temperatures back from %d, %d, %d,"
printk
(
KERN_INFO
"adt746x: Putting max temperatures back from %d, %d, %d,"
...
@@ -237,16 +235,9 @@ static int monitor_task(void *arg)
...
@@ -237,16 +235,9 @@ static int monitor_task(void *arg)
#ifdef DEBUG
#ifdef DEBUG
int
mfan_speed
;
int
mfan_speed
;
#endif
#endif
while
(
!
kthread_should_stop
())
lock_kernel
();
daemonize
(
"kfand"
);
unlock_kernel
();
strcpy
(
current
->
comm
,
"thermostat"
);
monitor_running
=
1
;
while
(
monitor_running
)
{
{
msleep
(
2000
);
msleep
_interruptible
(
2000
);
/* Check status */
/* Check status */
/* local : chip */
/* local : chip */
...
@@ -321,7 +312,6 @@ static int monitor_task(void *arg)
...
@@ -321,7 +312,6 @@ static int monitor_task(void *arg)
#endif
#endif
}
}
complete_and_exit
(
&
monitor_task_compl
,
0
);
return
0
;
return
0
;
}
}
...
@@ -387,7 +377,7 @@ attach_one_thermostat(struct i2c_adapter *adapter, int addr, int busno)
...
@@ -387,7 +377,7 @@ attach_one_thermostat(struct i2c_adapter *adapter, int addr, int busno)
thermostat
=
th
;
thermostat
=
th
;
if
(
i2c_attach_client
(
&
th
->
clt
))
{
if
(
i2c_attach_client
(
&
th
->
clt
))
{
printk
(
"adt746x: Thermostat failed to attach client !
\n
"
);
printk
(
KERN_INFO
"adt746x: Thermostat failed to attach client !
\n
"
);
thermostat
=
NULL
;
thermostat
=
NULL
;
kfree
(
th
);
kfree
(
th
);
return
-
ENODEV
;
return
-
ENODEV
;
...
@@ -403,10 +393,13 @@ attach_one_thermostat(struct i2c_adapter *adapter, int addr, int busno)
...
@@ -403,10 +393,13 @@ attach_one_thermostat(struct i2c_adapter *adapter, int addr, int busno)
write_both_fan_speed
(
th
,
-
1
);
write_both_fan_speed
(
th
,
-
1
);
}
}
init_completion
(
&
monitor_task_compl
);
thread_therm
=
kthread_run
(
monitor_task
,
th
,
"kfand"
);
monitor_thread_id
=
kernel_thread
(
monitor_task
,
th
,
if
(
thread_therm
==
ERR_PTR
(
-
ENOMEM
))
{
SIGCHLD
|
CLONE_KERNEL
);
printk
(
KERN_INFO
"adt746x: Kthread creation failed
\n
"
);
thread_therm
=
NULL
;
return
-
ENOMEM
;
}
return
0
;
return
0
;
}
}
...
...
drivers/mtd/maps/ixp4xx.c
View file @
72747577
/*
/*
* $Id: ixp4xx.c,v 1.
4 2004/08/31 22:55:51 dsaxena
Exp $
* $Id: ixp4xx.c,v 1.
6 2004/09/17 00:25:06 gleixner
Exp $
*
*
* drivers/mtd/maps/ixp4xx.c
* drivers/mtd/maps/ixp4xx.c
*
*
...
@@ -69,6 +69,19 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
...
@@ -69,6 +69,19 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
dest
[
len
-
1
]
=
BYTE0
(
src
[
i
]);
dest
[
len
-
1
]
=
BYTE0
(
src
[
i
]);
}
}
/*
* Unaligned writes are ignored, causing the 8-bit
* probe to fail and proceed to the 16-bit probe (which succeeds).
*/
static
void
ixp4xx_probe_write16
(
struct
map_info
*
map
,
map_word
d
,
unsigned
long
adr
)
{
if
(
!
(
adr
&
1
))
*
(
__u16
*
)
(
map
->
map_priv_1
+
adr
)
=
d
.
x
[
0
];
}
/*
* Fast write16 function without the probing check above
*/
static
void
ixp4xx_write16
(
struct
map_info
*
map
,
map_word
d
,
unsigned
long
adr
)
static
void
ixp4xx_write16
(
struct
map_info
*
map
,
map_word
d
,
unsigned
long
adr
)
{
{
*
(
__u16
*
)
(
map
->
map_priv_1
+
adr
)
=
d
.
x
[
0
];
*
(
__u16
*
)
(
map
->
map_priv_1
+
adr
)
=
d
.
x
[
0
];
...
@@ -171,7 +184,7 @@ static int ixp4xx_flash_probe(struct device *_dev)
...
@@ -171,7 +184,7 @@ static int ixp4xx_flash_probe(struct device *_dev)
info
->
map
.
bankwidth
=
2
;
info
->
map
.
bankwidth
=
2
;
info
->
map
.
name
=
dev
->
dev
.
bus_id
;
info
->
map
.
name
=
dev
->
dev
.
bus_id
;
info
->
map
.
read
=
ixp4xx_read16
,
info
->
map
.
read
=
ixp4xx_read16
,
info
->
map
.
write
=
ixp4xx_write16
,
info
->
map
.
write
=
ixp4xx_
probe_
write16
,
info
->
map
.
copy_from
=
ixp4xx_copy_from
,
info
->
map
.
copy_from
=
ixp4xx_copy_from
,
info
->
res
=
request_mem_region
(
dev
->
resource
->
start
,
info
->
res
=
request_mem_region
(
dev
->
resource
->
start
,
...
@@ -184,7 +197,7 @@ static int ixp4xx_flash_probe(struct device *_dev)
...
@@ -184,7 +197,7 @@ static int ixp4xx_flash_probe(struct device *_dev)
}
}
info
->
map
.
map_priv_1
=
info
->
map
.
map_priv_1
=
(
unsigned
long
)
ioremap
(
dev
->
resource
->
start
,
(
void
__iomem
*
)
ioremap
(
dev
->
resource
->
start
,
dev
->
resource
->
end
-
dev
->
resource
->
start
+
1
);
dev
->
resource
->
end
-
dev
->
resource
->
start
+
1
);
if
(
!
info
->
map
.
map_priv_1
)
{
if
(
!
info
->
map
.
map_priv_1
)
{
printk
(
KERN_ERR
"IXP4XXFlash: Failed to ioremap region
\n
"
);
printk
(
KERN_ERR
"IXP4XXFlash: Failed to ioremap region
\n
"
);
...
@@ -200,6 +213,9 @@ static int ixp4xx_flash_probe(struct device *_dev)
...
@@ -200,6 +213,9 @@ static int ixp4xx_flash_probe(struct device *_dev)
}
}
info
->
mtd
->
owner
=
THIS_MODULE
;
info
->
mtd
->
owner
=
THIS_MODULE
;
/* Use the fast version */
info
->
map
.
write
=
ixp4xx_write16
,
err
=
parse_mtd_partitions
(
info
->
mtd
,
probes
,
&
info
->
partitions
,
0
);
err
=
parse_mtd_partitions
(
info
->
mtd
,
probes
,
&
info
->
partitions
,
0
);
if
(
err
>
0
)
{
if
(
err
>
0
)
{
err
=
add_mtd_partitions
(
info
->
mtd
,
info
->
partitions
,
err
);
err
=
add_mtd_partitions
(
info
->
mtd
,
info
->
partitions
,
err
);
...
...
drivers/net/wan/pc300_tty.c
View file @
72747577
...
@@ -192,13 +192,14 @@ static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char signal)
...
@@ -192,13 +192,14 @@ static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char signal)
*/
*/
void
cpc_tty_init
(
pc300dev_t
*
pc300dev
)
void
cpc_tty_init
(
pc300dev_t
*
pc300dev
)
{
{
int
port
,
aux
;
unsigned
long
port
;
int
aux
;
st_cpc_tty_area
*
cpc_tty
;
st_cpc_tty_area
*
cpc_tty
;
/* hdlcX - X=interface number */
/* hdlcX - X=interface number */
port
=
pc300dev
->
dev
->
name
[
4
]
-
'0'
;
port
=
pc300dev
->
dev
->
name
[
4
]
-
'0'
;
if
(
port
>=
CPC_TTY_NPORTS
)
{
if
(
port
>=
CPC_TTY_NPORTS
)
{
printk
(
"%s-tty: invalid interface selected (0-%i): %
i"
,
printk
(
"%s-tty: invalid interface selected (0-%i): %
li"
,
pc300dev
->
dev
->
name
,
pc300dev
->
dev
->
name
,
CPC_TTY_NPORTS
-
1
,
port
);
CPC_TTY_NPORTS
-
1
,
port
);
return
;
return
;
...
@@ -682,7 +683,8 @@ static void cpc_tty_hangup(struct tty_struct *tty)
...
@@ -682,7 +683,8 @@ static void cpc_tty_hangup(struct tty_struct *tty)
*/
*/
static
void
cpc_tty_rx_work
(
void
*
data
)
static
void
cpc_tty_rx_work
(
void
*
data
)
{
{
int
port
,
i
,
j
;
unsigned
long
port
;
int
i
,
j
;
st_cpc_tty_area
*
cpc_tty
;
st_cpc_tty_area
*
cpc_tty
;
volatile
st_cpc_rx_buf
*
buf
;
volatile
st_cpc_rx_buf
*
buf
;
char
flags
=
0
,
flg_rx
=
1
;
char
flags
=
0
,
flg_rx
=
1
;
...
@@ -693,17 +695,14 @@ static void cpc_tty_rx_work(void * data)
...
@@ -693,17 +695,14 @@ static void cpc_tty_rx_work(void * data)
for
(
i
=
0
;
(
i
<
4
)
&&
flg_rx
;
i
++
)
{
for
(
i
=
0
;
(
i
<
4
)
&&
flg_rx
;
i
++
)
{
flg_rx
=
0
;
flg_rx
=
0
;
port
=
(
int
)
data
;
port
=
(
unsigned
long
)
data
;
for
(
j
=
0
;
j
<
CPC_TTY_NPORTS
;
j
++
)
{
for
(
j
=
0
;
j
<
CPC_TTY_NPORTS
;
j
++
)
{
cpc_tty
=
&
cpc_tty_area
[
port
];
cpc_tty
=
&
cpc_tty_area
[
port
];
if
((
buf
=
cpc_tty
->
buf_rx
.
first
)
!=
0
)
{
if
((
buf
=
cpc_tty
->
buf_rx
.
first
)
!=
0
)
{
if
(
cpc_tty
->
tty
)
{
if
(
cpc_tty
->
tty
)
ld
=
tty_ldisc_ref
(
cpc_tty
->
tty
);
{
if
(
ld
)
{
ld
=
tty_ldisc_ref
(
cpc_tty
);
if
(
ld
)
{
if
(
ld
->
receive_buf
)
{
if
(
ld
->
receive_buf
)
{
CPC_TTY_DBG
(
"%s: call line disc. receive_buf
\n
"
,
cpc_tty
->
name
);
CPC_TTY_DBG
(
"%s: call line disc. receive_buf
\n
"
,
cpc_tty
->
name
);
ld
->
receive_buf
(
cpc_tty
->
tty
,
(
char
*
)(
buf
->
data
),
&
flags
,
buf
->
size
);
ld
->
receive_buf
(
cpc_tty
->
tty
,
(
char
*
)(
buf
->
data
),
&
flags
,
buf
->
size
);
...
...
fs/jffs2/super.c
View file @
72747577
...
@@ -7,7 +7,7 @@
...
@@ -7,7 +7,7 @@
*
*
* For licensing information, see the file 'LICENCE' in this directory.
* For licensing information, see the file 'LICENCE' in this directory.
*
*
* $Id: super.c,v 1.9
7 2004/07/16 15:17
:57 dwmw2 Exp $
* $Id: super.c,v 1.9
9 2004/08/24 07:59
:57 dwmw2 Exp $
*
*
*/
*/
...
@@ -130,7 +130,7 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
...
@@ -130,7 +130,7 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
mtd
->
index
,
mtd
->
name
));
mtd
->
index
,
mtd
->
name
));
sb
->
s_op
=
&
jffs2_super_operations
;
sb
->
s_op
=
&
jffs2_super_operations
;
sb
->
s_flags
|=
MS_NOATIME
;
sb
->
s_flags
=
flags
|
MS_NOATIME
;
ret
=
jffs2_do_fill_super
(
sb
,
data
,
(
flags
&
MS_VERBOSE
)
?
1
:
0
);
ret
=
jffs2_do_fill_super
(
sb
,
data
,
(
flags
&
MS_VERBOSE
)
?
1
:
0
);
...
@@ -330,6 +330,7 @@ static int __init init_jffs2_fs(void)
...
@@ -330,6 +330,7 @@ static int __init init_jffs2_fs(void)
out_compressors:
out_compressors:
jffs2_compressors_exit
();
jffs2_compressors_exit
();
out:
out:
kmem_cache_destroy
(
jffs2_inode_cachep
);
return
ret
;
return
ret
;
}
}
...
...
include/asm-m32r/bitops.h
View file @
72747577
#ifndef _ASM_M32R_BITOPS_H
#ifndef _ASM_M32R_BITOPS_H
#define _ASM_M32R_BITOPS_H
#define _ASM_M32R_BITOPS_H
/* $Id$ */
/*
/*
* linux/include/asm-m32r/bitops.h
* linux/include/asm-m32r/bitops.h
* orig : i386 2.4.10
*
*
* Copyright 1992, Linus Torvalds.
* Copyright 1992, Linus Torvalds.
*
*
* M32R version:
* M32R version:
* Copyright (C) 2001, 2002 Hitoshi Yamamoto
* Copyright (C) 2001, 2002 Hitoshi Yamamoto
* Copyright (C) 2004 Hirokazu Takata
* Copyright (C) 2004 Hirokazu Takata
<takata at linux-m32r.org>
*/
*/
#include <linux/config.h>
#include <linux/config.h>
...
@@ -50,24 +47,25 @@
...
@@ -50,24 +47,25 @@
* Note that @nr may be almost arbitrarily large; this function is not
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
* restricted to acting on a single-word quantity.
*/
*/
static
__inline__
void
set_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
void
set_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
;
__u32
mask
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
unsigned
long
flags
;
unsigned
long
flags
;
unsigned
long
tmp
;
a
+=
(
nr
>>
5
);
a
+=
(
nr
>>
5
);
mask
=
(
1
<<
(
nr
&
0x1F
));
mask
=
(
1
<<
(
nr
&
0x1F
));
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
DCACHE_CLEAR
(
"
r4"
,
"r6"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r6"
,
"%1
"
)
LOAD
"
r4, @%0
;
\n\t
"
LOAD
"
%0, @%1
;
\n\t
"
"or
r4, %1
;
\n\t
"
"or
%0, %2
;
\n\t
"
STORE
"
r4, @%0
;
\n\t
"
STORE
"
%0, @%1
;
\n\t
"
:
/* no outputs */
:
"=&r"
(
tmp
)
:
"r"
(
a
),
"r"
(
mask
)
:
"r"
(
a
),
"r"
(
mask
)
:
"memory"
,
"r4"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r6"
,
"r6"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
...
@@ -84,7 +82,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
...
@@ -84,7 +82,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
* If it's called on the same region of memory simultaneously, the effect
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
* may be that only one operation succeeds.
*/
*/
static
__inline__
void
__set_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
void
__set_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
;
__u32
mask
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
...
@@ -104,11 +102,12 @@ static __inline__ void __set_bit(int nr, volatile void * addr)
...
@@ -104,11 +102,12 @@ static __inline__ void __set_bit(int nr, volatile void * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
* in order to ensure changes are visible on other processors.
*/
*/
static
__inline__
void
clear_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
void
clear_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
;
__u32
mask
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
unsigned
long
flags
;
unsigned
long
flags
;
unsigned
long
tmp
;
a
+=
(
nr
>>
5
);
a
+=
(
nr
>>
5
);
mask
=
(
1
<<
(
nr
&
0x1F
));
mask
=
(
1
<<
(
nr
&
0x1F
));
...
@@ -116,13 +115,13 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
...
@@ -116,13 +115,13 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
DCACHE_CLEAR
(
"
r4"
,
"r6"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r6"
,
"%1
"
)
LOAD
"
r4, @%0
;
\n\t
"
LOAD
"
%0, @%1
;
\n\t
"
"and
r4, %1
;
\n\t
"
"and
%0, %2
;
\n\t
"
STORE
"
r4, @%0
;
\n\t
"
STORE
"
%0, @%1
;
\n\t
"
:
/* no outputs */
:
"=&r"
(
tmp
)
:
"r"
(
a
),
"r"
(
~
mask
)
:
"r"
(
a
),
"r"
(
~
mask
)
:
"memory"
,
"r4"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r6"
,
"r6"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
...
@@ -130,7 +129,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
...
@@ -130,7 +129,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
static
__inline__
void
__clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
inline
void
__clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
{
unsigned
long
mask
;
unsigned
long
mask
;
volatile
unsigned
long
*
a
=
addr
;
volatile
unsigned
long
*
a
=
addr
;
...
@@ -152,7 +151,7 @@ static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
...
@@ -152,7 +151,7 @@ static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
* If it's called on the same region of memory simultaneously, the effect
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
* may be that only one operation succeeds.
*/
*/
static
__inline__
void
__change_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
void
__change_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
;
__u32
mask
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
...
@@ -171,24 +170,25 @@ static __inline__ void __change_bit(int nr, volatile void * addr)
...
@@ -171,24 +170,25 @@ static __inline__ void __change_bit(int nr, volatile void * addr)
* Note that @nr may be almost arbitrarily large; this function is not
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
* restricted to acting on a single-word quantity.
*/
*/
static
__inline__
void
change_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
void
change_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
;
__u32
mask
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
unsigned
long
flags
;
unsigned
long
flags
;
unsigned
long
tmp
;
a
+=
(
nr
>>
5
);
a
+=
(
nr
>>
5
);
mask
=
(
1
<<
(
nr
&
0x1F
));
mask
=
(
1
<<
(
nr
&
0x1F
));
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
DCACHE_CLEAR
(
"
r4"
,
"r6"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r6"
,
"%1
"
)
LOAD
"
r4, @%0
;
\n\t
"
LOAD
"
%0, @%1
;
\n\t
"
"xor
r4, %1
;
\n\t
"
"xor
%0, %2
;
\n\t
"
STORE
"
r4, @%0
;
\n\t
"
STORE
"
%0, @%1
;
\n\t
"
:
/* no outputs */
:
"=&r"
(
tmp
)
:
"r"
(
a
),
"r"
(
mask
)
:
"r"
(
a
),
"r"
(
mask
)
:
"memory"
,
"r4"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r6"
,
"r6"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
...
@@ -204,28 +204,30 @@ static __inline__ void change_bit(int nr, volatile void * addr)
...
@@ -204,28 +204,30 @@ static __inline__ void change_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered.
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
* It also implies a memory barrier.
*/
*/
static
__inline__
int
test_and_set_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
int
test_and_set_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
,
oldbit
;
__u32
mask
,
oldbit
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
unsigned
long
flags
;
unsigned
long
flags
;
unsigned
long
tmp
;
a
+=
(
nr
>>
5
);
a
+=
(
nr
>>
5
);
mask
=
(
1
<<
(
nr
&
0x1F
));
mask
=
(
1
<<
(
nr
&
0x1F
));
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
DCACHE_CLEAR
(
"%0"
,
"
r4"
,
"%1
"
)
DCACHE_CLEAR
(
"%0"
,
"
%1"
,
"%2
"
)
LOAD
" %0, @%
1
;
\n\t
"
LOAD
" %0, @%
2
;
\n\t
"
"mv
r4
, %0;
\n\t
"
"mv
%1
, %0;
\n\t
"
"and %0, %
2
;
\n\t
"
"and %0, %
3
;
\n\t
"
"or
r4, %2
;
\n\t
"
"or
%1, %3
;
\n\t
"
STORE
"
r4, @%1
;
\n\t
"
STORE
"
%1, @%2
;
\n\t
"
:
"=&r"
(
oldbit
)
:
"=&r"
(
oldbit
)
,
"=&r"
(
tmp
)
:
"r"
(
a
),
"r"
(
mask
)
:
"r"
(
a
),
"r"
(
mask
)
:
"memory"
,
"r4"
:
"memory"
);
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
return
(
oldbit
!=
0
);
return
(
oldbit
!=
0
);
}
}
...
@@ -238,7 +240,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
...
@@ -238,7 +240,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
* If two examples of this operation race, one can appear to succeed
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
* but actually fail. You must protect multiple accesses with a lock.
*/
*/
static
__inline__
int
__test_and_set_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
int
__test_and_set_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
,
oldbit
;
__u32
mask
,
oldbit
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
...
@@ -259,11 +261,12 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
...
@@ -259,11 +261,12 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered.
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
* It also implies a memory barrier.
*/
*/
static
__inline__
int
test_and_clear_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
int
test_and_clear_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
,
oldbit
;
__u32
mask
,
oldbit
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
unsigned
long
flags
;
unsigned
long
flags
;
unsigned
long
tmp
;
a
+=
(
nr
>>
5
);
a
+=
(
nr
>>
5
);
mask
=
(
1
<<
(
nr
&
0x1F
));
mask
=
(
1
<<
(
nr
&
0x1F
));
...
@@ -271,16 +274,16 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
...
@@ -271,16 +274,16 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
DCACHE_CLEAR
(
"%0"
,
"
r4"
,
"%2
"
)
DCACHE_CLEAR
(
"%0"
,
"
%1"
,
"%3
"
)
LOAD
" %0, @%
2
;
\n\t
"
LOAD
" %0, @%
3
;
\n\t
"
"mv
r4
, %0;
\n\t
"
"mv
%1
, %0;
\n\t
"
"and
%0, %1
;
\n\t
"
"and
%0, %2
;
\n\t
"
"not
%1, %1
;
\n\t
"
"not
%2, %2
;
\n\t
"
"and
r4, %1
;
\n\t
"
"and
%1, %2
;
\n\t
"
STORE
"
r4, @%2
;
\n\t
"
STORE
"
%1, @%3
;
\n\t
"
:
"=&r"
(
oldbit
),
"+r"
(
mask
)
:
"=&r"
(
oldbit
),
"
=&r"
(
tmp
),
"
+r"
(
mask
)
:
"r"
(
a
)
:
"r"
(
a
)
:
"memory"
,
"r4"
:
"memory"
);
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
...
@@ -296,7 +299,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
...
@@ -296,7 +299,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
* If two examples of this operation race, one can appear to succeed
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
* but actually fail. You must protect multiple accesses with a lock.
*/
*/
static
__inline__
int
__test_and_clear_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
int
__test_and_clear_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
,
oldbit
;
__u32
mask
,
oldbit
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
...
@@ -310,7 +313,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
...
@@ -310,7 +313,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
}
}
/* WARNING: non atomic and it can be reordered! */
/* WARNING: non atomic and it can be reordered! */
static
__inline__
int
__test_and_change_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
int
__test_and_change_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
,
oldbit
;
__u32
mask
,
oldbit
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
...
@@ -331,28 +334,30 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
...
@@ -331,28 +334,30 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered.
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
* It also implies a memory barrier.
*/
*/
static
__inline__
int
test_and_change_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
int
test_and_change_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u32
mask
,
oldbit
;
__u32
mask
,
oldbit
;
volatile
__u32
*
a
=
addr
;
volatile
__u32
*
a
=
addr
;
unsigned
long
flags
;
unsigned
long
flags
;
unsigned
long
tmp
;
a
+=
(
nr
>>
5
);
a
+=
(
nr
>>
5
);
mask
=
(
1
<<
(
nr
&
0x1F
));
mask
=
(
1
<<
(
nr
&
0x1F
));
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
DCACHE_CLEAR
(
"%0"
,
"
r4"
,
"%1
"
)
DCACHE_CLEAR
(
"%0"
,
"
%1"
,
"%2
"
)
LOAD
" %0, @%
1
;
\n\t
"
LOAD
" %0, @%
2
;
\n\t
"
"mv
r4
, %0;
\n\t
"
"mv
%1
, %0;
\n\t
"
"and %0, %
2
;
\n\t
"
"and %0, %
3
;
\n\t
"
"xor
r4, %2
;
\n\t
"
"xor
%1, %3
;
\n\t
"
STORE
"
r4, @%1
;
\n\t
"
STORE
"
%1, @%2
;
\n\t
"
:
"=&r"
(
oldbit
)
:
"=&r"
(
oldbit
)
,
"=&r"
(
tmp
)
:
"r"
(
a
),
"r"
(
mask
)
:
"r"
(
a
),
"r"
(
mask
)
:
"memory"
,
"r4"
:
"memory"
);
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
return
(
oldbit
!=
0
);
return
(
oldbit
!=
0
);
}
}
...
@@ -365,7 +370,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
...
@@ -365,7 +370,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
static int test_bit(int nr, const volatile void * addr);
static int test_bit(int nr, const volatile void * addr);
#endif
#endif
static
__inline__
int
test_bit
(
int
nr
,
const
volatile
void
*
addr
)
static
inline
int
test_bit
(
int
nr
,
const
volatile
void
*
addr
)
{
{
__u32
mask
;
__u32
mask
;
const
volatile
__u32
*
a
=
addr
;
const
volatile
__u32
*
a
=
addr
;
...
@@ -382,7 +387,7 @@ static __inline__ int test_bit(int nr, const volatile void * addr)
...
@@ -382,7 +387,7 @@ static __inline__ int test_bit(int nr, const volatile void * addr)
*
*
* Undefined if no zero exists, so code should check against ~0UL first.
* Undefined if no zero exists, so code should check against ~0UL first.
*/
*/
static
__inline__
unsigned
long
ffz
(
unsigned
long
word
)
static
inline
unsigned
long
ffz
(
unsigned
long
word
)
{
{
int
k
;
int
k
;
...
@@ -415,7 +420,7 @@ static __inline__ unsigned long ffz(unsigned long word)
...
@@ -415,7 +420,7 @@ static __inline__ unsigned long ffz(unsigned long word)
* @offset: The bitnumber to start searching at
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
* @size: The maximum size to search
*/
*/
static
__inline__
int
find_next_zero_bit
(
void
*
addr
,
int
size
,
int
offset
)
static
inline
int
find_next_zero_bit
(
void
*
addr
,
int
size
,
int
offset
)
{
{
unsigned
long
*
p
=
((
unsigned
long
*
)
addr
)
+
(
offset
>>
5
);
unsigned
long
*
p
=
((
unsigned
long
*
)
addr
)
+
(
offset
>>
5
);
unsigned
long
result
=
offset
&
~
31UL
;
unsigned
long
result
=
offset
&
~
31UL
;
...
@@ -457,7 +462,7 @@ static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
...
@@ -457,7 +462,7 @@ static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
*
*
* Undefined if no bit exists, so code should check against 0 first.
* Undefined if no bit exists, so code should check against 0 first.
*/
*/
static
__inline__
unsigned
long
__ffs
(
unsigned
long
word
)
static
inline
unsigned
long
__ffs
(
unsigned
long
word
)
{
{
int
k
=
0
;
int
k
=
0
;
...
@@ -483,7 +488,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
...
@@ -483,7 +488,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
* unlikely to be set. It's guaranteed that at least one of the 140
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
* bits is cleared.
*/
*/
static
__inline__
int
sched_find_first_bit
(
unsigned
long
*
b
)
static
inline
int
sched_find_first_bit
(
unsigned
long
*
b
)
{
{
if
(
unlikely
(
b
[
0
]))
if
(
unlikely
(
b
[
0
]))
return
__ffs
(
b
[
0
]);
return
__ffs
(
b
[
0
]);
...
@@ -502,7 +507,7 @@ static __inline__ int sched_find_first_bit(unsigned long *b)
...
@@ -502,7 +507,7 @@ static __inline__ int sched_find_first_bit(unsigned long *b)
* @offset: The bitnumber to start searching at
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
* @size: The maximum size to search
*/
*/
static
__inline__
unsigned
long
find_next_bit
(
const
unsigned
long
*
addr
,
static
inline
unsigned
long
find_next_bit
(
const
unsigned
long
*
addr
,
unsigned
long
size
,
unsigned
long
offset
)
unsigned
long
size
,
unsigned
long
offset
)
{
{
unsigned
int
*
p
=
((
unsigned
int
*
)
addr
)
+
(
offset
>>
5
);
unsigned
int
*
p
=
((
unsigned
int
*
)
addr
)
+
(
offset
>>
5
);
...
@@ -589,7 +594,7 @@ static __inline__ unsigned long find_next_bit(const unsigned long *addr,
...
@@ -589,7 +594,7 @@ static __inline__ unsigned long find_next_bit(const unsigned long *addr,
#define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit
#else
#else
static
__inline__
int
ext2_set_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
int
ext2_set_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u8
mask
,
oldbit
;
__u8
mask
,
oldbit
;
volatile
__u8
*
a
=
addr
;
volatile
__u8
*
a
=
addr
;
...
@@ -602,7 +607,7 @@ static __inline__ int ext2_set_bit(int nr, volatile void * addr)
...
@@ -602,7 +607,7 @@ static __inline__ int ext2_set_bit(int nr, volatile void * addr)
return
(
oldbit
!=
0
);
return
(
oldbit
!=
0
);
}
}
static
__inline__
int
ext2_clear_bit
(
int
nr
,
volatile
void
*
addr
)
static
inline
int
ext2_clear_bit
(
int
nr
,
volatile
void
*
addr
)
{
{
__u8
mask
,
oldbit
;
__u8
mask
,
oldbit
;
volatile
__u8
*
a
=
addr
;
volatile
__u8
*
a
=
addr
;
...
@@ -615,7 +620,7 @@ static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
...
@@ -615,7 +620,7 @@ static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
return
(
oldbit
!=
0
);
return
(
oldbit
!=
0
);
}
}
static
__inline__
int
ext2_test_bit
(
int
nr
,
const
volatile
void
*
addr
)
static
inline
int
ext2_test_bit
(
int
nr
,
const
volatile
void
*
addr
)
{
{
__u32
mask
;
__u32
mask
;
const
volatile
__u8
*
a
=
addr
;
const
volatile
__u8
*
a
=
addr
;
...
@@ -629,7 +634,7 @@ static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
...
@@ -629,7 +634,7 @@ static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
#define ext2_find_first_zero_bit(addr, size) \
#define ext2_find_first_zero_bit(addr, size) \
ext2_find_next_zero_bit((addr), (size), 0)
ext2_find_next_zero_bit((addr), (size), 0)
static
__inline__
unsigned
long
ext2_find_next_zero_bit
(
void
*
addr
,
static
inline
unsigned
long
ext2_find_next_zero_bit
(
void
*
addr
,
unsigned
long
size
,
unsigned
long
offset
)
unsigned
long
size
,
unsigned
long
offset
)
{
{
unsigned
long
*
p
=
((
unsigned
long
*
)
addr
)
+
(
offset
>>
5
);
unsigned
long
*
p
=
((
unsigned
long
*
)
addr
)
+
(
offset
>>
5
);
...
@@ -709,4 +714,3 @@ static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
...
@@ -709,4 +714,3 @@ static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
#endif
/* _ASM_M32R_BITOPS_H */
#endif
/* _ASM_M32R_BITOPS_H */
include/asm-m32r/hardirq.h
View file @
72747577
...
@@ -30,7 +30,12 @@ typedef struct {
...
@@ -30,7 +30,12 @@ typedef struct {
#define PREEMPT_BITS 8
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define SOFTIRQ_BITS 8
#if NR_IRQS > 256
#define HARDIRQ_BITS 9
#else
#define HARDIRQ_BITS 8
#define HARDIRQ_BITS 8
#endif
#define PREEMPT_SHIFT 0
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
...
@@ -45,29 +50,10 @@ typedef struct {
...
@@ -45,29 +50,10 @@ typedef struct {
# error HARDIRQ_BITS is too low!
# error HARDIRQ_BITS is too low!
#endif
#endif
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#define nmi_enter() (irq_enter())
#define nmi_enter() (irq_enter())
#define nmi_exit() (preempt_count() -= HARDIRQ_OFFSET)
#define nmi_exit() (preempt_count() -= HARDIRQ_OFFSET)
#ifdef CONFIG_PREEMPT
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
#define irq_exit() \
do { \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
preempt_count() -= IRQ_EXIT_OFFSET; \
...
@@ -76,10 +62,4 @@ do { \
...
@@ -76,10 +62,4 @@ do { \
preempt_enable_no_resched(); \
preempt_enable_no_resched(); \
} while (0)
} while (0)
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
#else
extern
void
synchronize_irq
(
unsigned
int
irq
);
#endif
/* CONFIG_SMP */
#endif
/* __ASM_HARDIRQ_H */
#endif
/* __ASM_HARDIRQ_H */
include/asm-m32r/m32102.h
View file @
72747577
...
@@ -2,10 +2,11 @@
...
@@ -2,10 +2,11 @@
#define _M32102_H_
#define _M32102_H_
/*
/*
* Mitsubishi M32R 32102 group
* Renesas M32R 32102 group
* Copyright (c) 2001 [Hitoshi Yamamoto] All rights reserved.
*
* Copyright (c) 2001 Hitoshi Yamamoto
* Copyright (c) 2003, 2004 Renesas Technology Corp.
*/
*/
/* $Id$ */
/*======================================================================*
/*======================================================================*
* Special Function Register
* Special Function Register
...
...
include/asm-m32r/m32r.h
View file @
72747577
...
@@ -2,12 +2,11 @@
...
@@ -2,12 +2,11 @@
#define _ASM_M32R_M32R_H_
#define _ASM_M32R_M32R_H_
/*
/*
* Mitsubishi M32R processor
* Renesas M32R processor
* Copyright (C) 1997-2002, Mitsubishi Electric Corporation
*
* Copyright (C) 2003, 2004 Renesas Technology Corp.
*/
*/
/* $Id$ */
#include <linux/config.h>
#include <linux/config.h>
/* Chip type */
/* Chip type */
...
...
include/asm-m32r/m32r_mp_fpga.h
View file @
72747577
...
@@ -2,12 +2,12 @@
...
@@ -2,12 +2,12 @@
#define _ASM_M32R_M32R_MP_FPGA_
#define _ASM_M32R_M32R_MP_FPGA_
/*
/*
* Mitsubishi M32R-MP-FPGA
* Renesas M32R-MP-FPGA
* Copyright (c) 2002 [Hitoshi Yamamoto] All rights reserved.
*
* Copyright (c) 2002 Hitoshi Yamamoto
* Copyright (c) 2003, 2004 Renesas Technology Corp.
*/
*/
/* $Id$ */
/*
/*
* ========================================================
* ========================================================
* M32R-MP-FPGA Memory Map
* M32R-MP-FPGA Memory Map
...
...
include/asm-m32r/semaphore.h
View file @
72747577
#ifndef _ASM_M32R_SEMAPHORE_H
#ifndef _ASM_M32R_SEMAPHORE_H
#define _ASM_M32R_SEMAPHORE_H
#define _ASM_M32R_SEMAPHORE_H
/* $Id$ */
#include <linux/linkage.h>
#include <linux/linkage.h>
#ifdef __KERNEL__
#ifdef __KERNEL__
...
@@ -10,39 +8,15 @@
...
@@ -10,39 +8,15 @@
/*
/*
* SMP- and interrupt-safe semaphores..
* SMP- and interrupt-safe semaphores..
*
*
* (C) Copyright 1996 Linus Torvalds
* Copyright (C) 1996 Linus Torvalds
*
* Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
* Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
* the original code and to make semaphore waits
* interruptible so that processes waiting on
* semaphores can be killed.
* Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
* functions in asm/sempahore-helper.h while fixing a
* potential and subtle race discovered by Ulrich Schmid
* in down_interruptible(). Since I started to play here I
* also implemented the `trylock' semaphore operation.
* 1999-07-02 Artur Skawina <skawina@geocities.com>
* Optimized "0(ecx)" -> "(ecx)" (the assembler does not
* do this). Changed calling sequences from push/jmp to
* traditional call/ret.
* Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
* Some hacks to ensure compatibility with recent
* GCC snapshots, to avoid stack corruption when compiling
* with -fomit-frame-pointer. It's not sure if this will
* be fixed in GCC, as our previous implementation was a
* bit dubious.
*
* If you would like to see an analysis of this implementation, please
* ftp to gcom.com and download the file
* /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
*
*/
*/
#include <linux/config.h>
#include <linux/config.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/wait.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
#include <linux/rwsem.h>
#include <asm/system.h>
#include <asm/atomic.h>
#undef LOAD
#undef LOAD
#undef STORE
#undef STORE
...
@@ -58,21 +32,14 @@ struct semaphore {
...
@@ -58,21 +32,14 @@ struct semaphore {
atomic_t
count
;
atomic_t
count
;
int
sleepers
;
int
sleepers
;
wait_queue_head_t
wait
;
wait_queue_head_t
wait
;
#ifdef WAITQUEUE_DEBUG
long
__magic
;
#endif
};
};
#ifdef WAITQUEUE_DEBUG
#define __SEMAPHORE_INITIALIZER(name, n) \
# define __SEM_DEBUG_INIT(name) \
{ \
, (int)&(name).__magic
.count = ATOMIC_INIT(n), \
#else
.sleepers = 0, \
# define __SEM_DEBUG_INIT(name)
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
#endif
}
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
__SEM_DEBUG_INIT(name) }
#define __MUTEX_INITIALIZER(name) \
#define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name,1)
__SEMAPHORE_INITIALIZER(name,1)
...
@@ -83,7 +50,7 @@ struct semaphore {
...
@@ -83,7 +50,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
static
__inline__
void
sema_init
(
struct
semaphore
*
sem
,
int
val
)
static
inline
void
sema_init
(
struct
semaphore
*
sem
,
int
val
)
{
{
/*
/*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
...
@@ -94,17 +61,14 @@ static __inline__ void sema_init (struct semaphore *sem, int val)
...
@@ -94,17 +61,14 @@ static __inline__ void sema_init (struct semaphore *sem, int val)
atomic_set
(
&
sem
->
count
,
val
);
atomic_set
(
&
sem
->
count
,
val
);
sem
->
sleepers
=
0
;
sem
->
sleepers
=
0
;
init_waitqueue_head
(
&
sem
->
wait
);
init_waitqueue_head
(
&
sem
->
wait
);
#ifdef WAITQUEUE_DEBUG
sem
->
__magic
=
(
int
)
&
sem
->
__magic
;
#endif
}
}
static
__inline__
void
init_MUTEX
(
struct
semaphore
*
sem
)
static
inline
void
init_MUTEX
(
struct
semaphore
*
sem
)
{
{
sema_init
(
sem
,
1
);
sema_init
(
sem
,
1
);
}
}
static
__inline__
void
init_MUTEX_LOCKED
(
struct
semaphore
*
sem
)
static
inline
void
init_MUTEX_LOCKED
(
struct
semaphore
*
sem
)
{
{
sema_init
(
sem
,
0
);
sema_init
(
sem
,
0
);
}
}
...
@@ -120,19 +84,15 @@ asmlinkage int __down_trylock(struct semaphore * sem);
...
@@ -120,19 +84,15 @@ asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage
void
__up
(
struct
semaphore
*
sem
);
asmlinkage
void
__up
(
struct
semaphore
*
sem
);
/*
/*
* This is ugly, but we want the default case to fall through.
* Atomically decrement the semaphore's count. If it goes negative,
* "__down_failed" is a special asm handler that calls the C
* block the calling thread in the TASK_UNINTERRUPTIBLE state.
* routine that actually waits. See arch/i386/kernel/semaphore.c
*/
*/
static
__inline__
void
down
(
struct
semaphore
*
sem
)
static
inline
void
down
(
struct
semaphore
*
sem
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
temp
;
long
count
;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC
(
sem
->
__magic
);
#endif
might_sleep
();
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
"# down
\n\t
"
"# down
\n\t
"
...
@@ -140,7 +100,7 @@ static __inline__ void down(struct semaphore * sem)
...
@@ -140,7 +100,7 @@ static __inline__ void down(struct semaphore * sem)
LOAD
" %0, @%1;
\n\t
"
LOAD
" %0, @%1;
\n\t
"
"addi %0, #-1;
\n\t
"
"addi %0, #-1;
\n\t
"
STORE
" %0, @%1;
\n\t
"
STORE
" %0, @%1;
\n\t
"
:
"=&r"
(
temp
)
:
"=&r"
(
count
)
:
"r"
(
&
sem
->
count
)
:
"r"
(
&
sem
->
count
)
:
"memory"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
...
@@ -149,7 +109,7 @@ static __inline__ void down(struct semaphore * sem)
...
@@ -149,7 +109,7 @@ static __inline__ void down(struct semaphore * sem)
);
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
if
(
temp
<
0
)
if
(
unlikely
(
count
<
0
)
)
__down
(
sem
);
__down
(
sem
);
}
}
...
@@ -157,16 +117,13 @@ static __inline__ void down(struct semaphore * sem)
...
@@ -157,16 +117,13 @@ static __inline__ void down(struct semaphore * sem)
* Interruptible try to acquire a semaphore. If we obtained
* Interruptible try to acquire a semaphore. If we obtained
* it, return zero. If we were interrupted, returns -EINTR
* it, return zero. If we were interrupted, returns -EINTR
*/
*/
static
__inline__
int
down_interruptible
(
struct
semaphore
*
sem
)
static
inline
int
down_interruptible
(
struct
semaphore
*
sem
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
temp
;
long
count
;
int
result
=
0
;
int
result
=
0
;
#ifdef WAITQUEUE_DEBUG
might_sleep
();
CHECK_MAGIC
(
sem
->
__magic
);
#endif
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
"# down_interruptible
\n\t
"
"# down_interruptible
\n\t
"
...
@@ -174,7 +131,7 @@ static __inline__ int down_interruptible(struct semaphore * sem)
...
@@ -174,7 +131,7 @@ static __inline__ int down_interruptible(struct semaphore * sem)
LOAD
" %0, @%1;
\n\t
"
LOAD
" %0, @%1;
\n\t
"
"addi %0, #-1;
\n\t
"
"addi %0, #-1;
\n\t
"
STORE
" %0, @%1;
\n\t
"
STORE
" %0, @%1;
\n\t
"
:
"=&r"
(
temp
)
:
"=&r"
(
count
)
:
"r"
(
&
sem
->
count
)
:
"r"
(
&
sem
->
count
)
:
"memory"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
...
@@ -183,7 +140,7 @@ static __inline__ int down_interruptible(struct semaphore * sem)
...
@@ -183,7 +140,7 @@ static __inline__ int down_interruptible(struct semaphore * sem)
);
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
if
(
temp
<
0
)
if
(
unlikely
(
count
<
0
)
)
result
=
__down_interruptible
(
sem
);
result
=
__down_interruptible
(
sem
);
return
result
;
return
result
;
...
@@ -193,16 +150,12 @@ static __inline__ int down_interruptible(struct semaphore * sem)
...
@@ -193,16 +150,12 @@ static __inline__ int down_interruptible(struct semaphore * sem)
* Non-blockingly attempt to down() a semaphore.
* Non-blockingly attempt to down() a semaphore.
* Returns zero if we acquired it
* Returns zero if we acquired it
*/
*/
static
__inline__
int
down_trylock
(
struct
semaphore
*
sem
)
static
inline
int
down_trylock
(
struct
semaphore
*
sem
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
temp
;
long
count
;
int
result
=
0
;
int
result
=
0
;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC
(
sem
->
__magic
);
#endif
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
"# down_trylock
\n\t
"
"# down_trylock
\n\t
"
...
@@ -210,7 +163,7 @@ static __inline__ int down_trylock(struct semaphore * sem)
...
@@ -210,7 +163,7 @@ static __inline__ int down_trylock(struct semaphore * sem)
LOAD
" %0, @%1;
\n\t
"
LOAD
" %0, @%1;
\n\t
"
"addi %0, #-1;
\n\t
"
"addi %0, #-1;
\n\t
"
STORE
" %0, @%1;
\n\t
"
STORE
" %0, @%1;
\n\t
"
:
"=&r"
(
temp
)
:
"=&r"
(
count
)
:
"r"
(
&
sem
->
count
)
:
"r"
(
&
sem
->
count
)
:
"memory"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
...
@@ -219,7 +172,7 @@ static __inline__ int down_trylock(struct semaphore * sem)
...
@@ -219,7 +172,7 @@ static __inline__ int down_trylock(struct semaphore * sem)
);
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
if
(
temp
<
0
)
if
(
unlikely
(
count
<
0
)
)
result
=
__down_trylock
(
sem
);
result
=
__down_trylock
(
sem
);
return
result
;
return
result
;
...
@@ -231,14 +184,10 @@ static __inline__ int down_trylock(struct semaphore * sem)
...
@@ -231,14 +184,10 @@ static __inline__ int down_trylock(struct semaphore * sem)
* The default case (no contention) will result in NO
* The default case (no contention) will result in NO
* jumps for both down() and up().
* jumps for both down() and up().
*/
*/
static
__inline__
void
up
(
struct
semaphore
*
sem
)
static
inline
void
up
(
struct
semaphore
*
sem
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
temp
;
long
count
;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC
(
sem
->
__magic
);
#endif
local_irq_save
(
flags
);
local_irq_save
(
flags
);
__asm__
__volatile__
(
__asm__
__volatile__
(
...
@@ -247,7 +196,7 @@ static __inline__ void up(struct semaphore * sem)
...
@@ -247,7 +196,7 @@ static __inline__ void up(struct semaphore * sem)
LOAD
" %0, @%1;
\n\t
"
LOAD
" %0, @%1;
\n\t
"
"addi %0, #1;
\n\t
"
"addi %0, #1;
\n\t
"
STORE
" %0, @%1;
\n\t
"
STORE
" %0, @%1;
\n\t
"
:
"=&r"
(
temp
)
:
"=&r"
(
count
)
:
"r"
(
&
sem
->
count
)
:
"r"
(
&
sem
->
count
)
:
"memory"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
...
@@ -256,11 +205,10 @@ static __inline__ void up(struct semaphore * sem)
...
@@ -256,11 +205,10 @@ static __inline__ void up(struct semaphore * sem)
);
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
if
(
temp
<=
0
)
if
(
unlikely
(
count
<=
0
)
)
__up
(
sem
);
__up
(
sem
);
}
}
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
#endif
/* _ASM_M32R_SEMAPHORE_H */
#endif
/* _ASM_M32R_SEMAPHORE_H */
include/asm-m32r/spinlock.h
View file @
72747577
#ifndef _ASM_M32R_SPINLOCK_H
#ifndef _ASM_M32R_SPINLOCK_H
#define _ASM_M32R_SPINLOCK_H
#define _ASM_M32R_SPINLOCK_H
/* $Id$ */
/*
/*
* linux/include/asm-m32r/spinlock.h
* linux/include/asm-m32r/spinlock.h
* orig : i386 2.4.10
*
*
* M32R version:
* M32R version:
* Copyright (C) 2001, 2002 Hitoshi Yamamoto
* Copyright (C) 2001, 2002 Hitoshi Yamamoto
* Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
*/
*/
#include <linux/config.h>
/* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
#include <linux/config.h>
/* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
...
@@ -41,6 +39,9 @@ typedef struct {
...
@@ -41,6 +39,9 @@ typedef struct {
#if SPINLOCK_DEBUG
#if SPINLOCK_DEBUG
unsigned
magic
;
unsigned
magic
;
#endif
#endif
#ifdef CONFIG_PREEMPT
unsigned
int
break_lock
;
#endif
}
spinlock_t
;
}
spinlock_t
;
#define SPINLOCK_MAGIC 0xdead4ead
#define SPINLOCK_MAGIC 0xdead4ead
...
@@ -66,22 +67,17 @@ typedef struct {
...
@@ -66,22 +67,17 @@ typedef struct {
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
/*
/**
* This works. Despite all the confusion.
* _raw_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
*
* _raw_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/
*/
static
inline
int
_raw_spin_trylock
(
spinlock_t
*
lock
)
/*======================================================================*
* Try spin lock
*======================================================================*
* Argument:
* arg0: lock
* Return value:
* =1: Success
* =0: Failure
*======================================================================*/
static
__inline__
int
_raw_spin_trylock
(
spinlock_t
*
lock
)
{
{
int
oldval
;
int
oldval
;
unsigned
long
tmp1
,
tmp2
;
/*
/*
* lock->lock : =1 : unlock
* lock->lock : =1 : unlock
...
@@ -93,16 +89,16 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock)
...
@@ -93,16 +89,16 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock)
*/
*/
__asm__
__volatile__
(
__asm__
__volatile__
(
"# spin_trylock
\n\t
"
"# spin_trylock
\n\t
"
"ldi
r4
, #0;
\n\t
"
"ldi
%1
, #0;
\n\t
"
"mvfc
r5
, psw;
\n\t
"
"mvfc
%2
, psw;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
DCACHE_CLEAR
(
"%0"
,
"r6"
,
"%
1
"
)
DCACHE_CLEAR
(
"%0"
,
"r6"
,
"%
3
"
)
"lock %0, @%
1
;
\n\t
"
"lock %0, @%
3
;
\n\t
"
"unlock
r4, @%1
;
\n\t
"
"unlock
%1, @%3
;
\n\t
"
"mvtc
r5
, psw;
\n\t
"
"mvtc
%2
, psw;
\n\t
"
:
"=&r"
(
oldval
)
:
"=&r"
(
oldval
)
,
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
:
"r"
(
&
lock
->
lock
)
:
"r"
(
&
lock
->
lock
)
:
"memory"
,
"r4"
,
"r5"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r6"
,
"r6"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
...
@@ -111,8 +107,10 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock)
...
@@ -111,8 +107,10 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock)
return
(
oldval
>
0
);
return
(
oldval
>
0
);
}
}
static
__inline__
void
_raw_spin_lock
(
spinlock_t
*
lock
)
static
inline
void
_raw_spin_lock
(
spinlock_t
*
lock
)
{
{
unsigned
long
tmp0
,
tmp1
;
#if SPINLOCK_DEBUG
#if SPINLOCK_DEBUG
__label__
here
;
__label__
here
;
here:
here:
...
@@ -135,31 +133,31 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock)
...
@@ -135,31 +133,31 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock)
"# spin_lock
\n\t
"
"# spin_lock
\n\t
"
".fillinsn
\n
"
".fillinsn
\n
"
"1:
\n\t
"
"1:
\n\t
"
"mvfc
r5
, psw;
\n\t
"
"mvfc
%1
, psw;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
DCACHE_CLEAR
(
"
r4"
,
"r6"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r6"
,
"%2
"
)
"lock
r4, @%0
;
\n\t
"
"lock
%0, @%2
;
\n\t
"
"addi
r4
, #-1;
\n\t
"
"addi
%0
, #-1;
\n\t
"
"unlock
r4, @%0
;
\n\t
"
"unlock
%0, @%2
;
\n\t
"
"mvtc
r5
, psw;
\n\t
"
"mvtc
%1
, psw;
\n\t
"
"bltz
r4
, 2f;
\n\t
"
"bltz
%0
, 2f;
\n\t
"
LOCK_SECTION_START
(
".balign 4
\n\t
"
)
LOCK_SECTION_START
(
".balign 4
\n\t
"
)
".fillinsn
\n
"
".fillinsn
\n
"
"2:
\n\t
"
"2:
\n\t
"
"ld
r4, @%0
;
\n\t
"
"ld
%0, @%2
;
\n\t
"
"bgtz
r4
, 1b;
\n\t
"
"bgtz
%0
, 1b;
\n\t
"
"bra 2b;
\n\t
"
"bra 2b;
\n\t
"
LOCK_SECTION_END
LOCK_SECTION_END
:
/* no outputs */
:
"=&r"
(
tmp0
),
"=&r"
(
tmp1
)
:
"r"
(
&
lock
->
lock
)
:
"r"
(
&
lock
->
lock
)
:
"memory"
,
"r4"
,
"r5"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r6"
,
"r6"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
);
);
}
}
static
__inline__
void
_raw_spin_unlock
(
spinlock_t
*
lock
)
static
inline
void
_raw_spin_unlock
(
spinlock_t
*
lock
)
{
{
#if SPINLOCK_DEBUG
#if SPINLOCK_DEBUG
BUG_ON
(
lock
->
magic
!=
SPINLOCK_MAGIC
);
BUG_ON
(
lock
->
magic
!=
SPINLOCK_MAGIC
);
...
@@ -184,6 +182,9 @@ typedef struct {
...
@@ -184,6 +182,9 @@ typedef struct {
#if SPINLOCK_DEBUG
#if SPINLOCK_DEBUG
unsigned
magic
;
unsigned
magic
;
#endif
#endif
#ifdef CONFIG_PREEMPT
unsigned
int
break_lock
;
#endif
}
rwlock_t
;
}
rwlock_t
;
#define RWLOCK_MAGIC 0xdeaf1eed
#define RWLOCK_MAGIC 0xdeaf1eed
...
@@ -211,8 +212,10 @@ typedef struct {
...
@@ -211,8 +212,10 @@ typedef struct {
*/
*/
/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
static
__inline__
void
_raw_read_lock
(
rwlock_t
*
rw
)
static
inline
void
_raw_read_lock
(
rwlock_t
*
rw
)
{
{
unsigned
long
tmp0
,
tmp1
;
#if SPINLOCK_DEBUG
#if SPINLOCK_DEBUG
BUG_ON
(
rw
->
magic
!=
RWLOCK_MAGIC
);
BUG_ON
(
rw
->
magic
!=
RWLOCK_MAGIC
);
#endif
#endif
...
@@ -231,40 +234,42 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
...
@@ -231,40 +234,42 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
"# read_lock
\n\t
"
"# read_lock
\n\t
"
".fillinsn
\n
"
".fillinsn
\n
"
"1:
\n\t
"
"1:
\n\t
"
"mvfc
r5
, psw;
\n\t
"
"mvfc
%1
, psw;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
DCACHE_CLEAR
(
"
r4"
,
"r6"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r6"
,
"%2
"
)
"lock
r4, @%0
;
\n\t
"
"lock
%0, @%2
;
\n\t
"
"addi
r4
, #-1;
\n\t
"
"addi
%0
, #-1;
\n\t
"
"unlock
r4, @%0
;
\n\t
"
"unlock
%0, @%2
;
\n\t
"
"mvtc
r5
, psw;
\n\t
"
"mvtc
%1
, psw;
\n\t
"
"bltz
r4
, 2f;
\n\t
"
"bltz
%0
, 2f;
\n\t
"
LOCK_SECTION_START
(
".balign 4
\n\t
"
)
LOCK_SECTION_START
(
".balign 4
\n\t
"
)
".fillinsn
\n
"
".fillinsn
\n
"
"2:
\n\t
"
"2:
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
DCACHE_CLEAR
(
"
r4"
,
"r6"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r6"
,
"%2
"
)
"lock
r4, @%0
;
\n\t
"
"lock
%0, @%2
;
\n\t
"
"addi
r4
, #1;
\n\t
"
"addi
%0
, #1;
\n\t
"
"unlock
r4, @%0
;
\n\t
"
"unlock
%0, @%2
;
\n\t
"
"mvtc
r5
, psw;
\n\t
"
"mvtc
%1
, psw;
\n\t
"
".fillinsn
\n
"
".fillinsn
\n
"
"3:
\n\t
"
"3:
\n\t
"
"ld
r4, @%0
;
\n\t
"
"ld
%0, @%2
;
\n\t
"
"bgtz
r4
, 1b;
\n\t
"
"bgtz
%0
, 1b;
\n\t
"
"bra 3b;
\n\t
"
"bra 3b;
\n\t
"
LOCK_SECTION_END
LOCK_SECTION_END
:
/* no outputs */
:
"=&r"
(
tmp0
),
"=&r"
(
tmp1
)
:
"r"
(
&
rw
->
lock
)
:
"r"
(
&
rw
->
lock
)
:
"memory"
,
"r4"
,
"r5"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r6"
,
"r6"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
);
);
}
}
static
__inline__
void
_raw_write_lock
(
rwlock_t
*
rw
)
static
inline
void
_raw_write_lock
(
rwlock_t
*
rw
)
{
{
unsigned
long
tmp0
,
tmp1
,
tmp2
;
#if SPINLOCK_DEBUG
#if SPINLOCK_DEBUG
BUG_ON
(
rw
->
magic
!=
RWLOCK_MAGIC
);
BUG_ON
(
rw
->
magic
!=
RWLOCK_MAGIC
);
#endif
#endif
...
@@ -281,85 +286,91 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
...
@@ -281,85 +286,91 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
*/
*/
__asm__
__volatile__
(
__asm__
__volatile__
(
"# write_lock
\n\t
"
"# write_lock
\n\t
"
"seth
r5
, #high("
RW_LOCK_BIAS_STR
");
\n\t
"
"seth
%1
, #high("
RW_LOCK_BIAS_STR
");
\n\t
"
"or3
r5, r5
, #low("
RW_LOCK_BIAS_STR
");
\n\t
"
"or3
%1, %1
, #low("
RW_LOCK_BIAS_STR
");
\n\t
"
".fillinsn
\n
"
".fillinsn
\n
"
"1:
\n\t
"
"1:
\n\t
"
"mvfc
r6
, psw;
\n\t
"
"mvfc
%2
, psw;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
DCACHE_CLEAR
(
"
r4"
,
"r7"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r7"
,
"%3
"
)
"lock
r4, @%0
;
\n\t
"
"lock
%0, @%3
;
\n\t
"
"sub
r4, r5
;
\n\t
"
"sub
%0, %1
;
\n\t
"
"unlock
r4, @%0
;
\n\t
"
"unlock
%0, @%3
;
\n\t
"
"mvtc
r6
, psw;
\n\t
"
"mvtc
%2
, psw;
\n\t
"
"bnez
r4
, 2f;
\n\t
"
"bnez
%0
, 2f;
\n\t
"
LOCK_SECTION_START
(
".balign 4
\n\t
"
)
LOCK_SECTION_START
(
".balign 4
\n\t
"
)
".fillinsn
\n
"
".fillinsn
\n
"
"2:
\n\t
"
"2:
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
DCACHE_CLEAR
(
"
r4"
,
"r7"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r7"
,
"%3
"
)
"lock
r4, @%0
;
\n\t
"
"lock
%0, @%3
;
\n\t
"
"add
r4, r5
;
\n\t
"
"add
%0, %1
;
\n\t
"
"unlock
r4, @%0
;
\n\t
"
"unlock
%0, @%3
;
\n\t
"
"mvtc
r6
, psw;
\n\t
"
"mvtc
%2
, psw;
\n\t
"
".fillinsn
\n
"
".fillinsn
\n
"
"3:
\n\t
"
"3:
\n\t
"
"ld
r4, @%0
;
\n\t
"
"ld
%0, @%3
;
\n\t
"
"beq
r4, r5
, 1b;
\n\t
"
"beq
%0, %1
, 1b;
\n\t
"
"bra 3b;
\n\t
"
"bra 3b;
\n\t
"
LOCK_SECTION_END
LOCK_SECTION_END
:
/* no outputs */
:
"=&r"
(
tmp0
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
:
"r"
(
&
rw
->
lock
)
:
"r"
(
&
rw
->
lock
)
:
"memory"
,
"r4"
,
"r5"
,
"r6"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r7"
,
"r7"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
);
);
}
}
static
__inline__
void
_raw_read_unlock
(
rwlock_t
*
rw
)
static
inline
void
_raw_read_unlock
(
rwlock_t
*
rw
)
{
{
unsigned
long
tmp0
,
tmp1
;
__asm__
__volatile__
(
__asm__
__volatile__
(
"# read_unlock
\n\t
"
"# read_unlock
\n\t
"
"mvfc
r5
, psw;
\n\t
"
"mvfc
%1
, psw;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
DCACHE_CLEAR
(
"
r4"
,
"r6"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r6"
,
"%2
"
)
"lock
r4, @%0
;
\n\t
"
"lock
%0, @%2
;
\n\t
"
"addi
r4
, #1;
\n\t
"
"addi
%0
, #1;
\n\t
"
"unlock
r4, @%0
;
\n\t
"
"unlock
%0, @%2
;
\n\t
"
"mvtc
r5
, psw;
\n\t
"
"mvtc
%1
, psw;
\n\t
"
:
/* no outputs */
:
"=&r"
(
tmp0
),
"=&r"
(
tmp1
)
:
"r"
(
&
rw
->
lock
)
:
"r"
(
&
rw
->
lock
)
:
"memory"
,
"r4"
,
"r5"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r6"
,
"r6"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
);
);
}
}
static
__inline__
void
_raw_write_unlock
(
rwlock_t
*
rw
)
static
inline
void
_raw_write_unlock
(
rwlock_t
*
rw
)
{
{
unsigned
long
tmp0
,
tmp1
,
tmp2
;
__asm__
__volatile__
(
__asm__
__volatile__
(
"# write_unlock
\n\t
"
"# write_unlock
\n\t
"
"seth
r5
, #high("
RW_LOCK_BIAS_STR
");
\n\t
"
"seth
%1
, #high("
RW_LOCK_BIAS_STR
");
\n\t
"
"or3
r5, r5
, #low("
RW_LOCK_BIAS_STR
");
\n\t
"
"or3
%1, %1
, #low("
RW_LOCK_BIAS_STR
");
\n\t
"
"mvfc
r6
, psw;
\n\t
"
"mvfc
%2
, psw;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
"clrpsw #0x40 -> nop;
\n\t
"
DCACHE_CLEAR
(
"
r4"
,
"r7"
,
"%0
"
)
DCACHE_CLEAR
(
"
%0"
,
"r7"
,
"%3
"
)
"lock
r4, @%0
;
\n\t
"
"lock
%0, @%3
;
\n\t
"
"add
r4, r5
;
\n\t
"
"add
%0, %1
;
\n\t
"
"unlock
r4, @%0
;
\n\t
"
"unlock
%0, @%3
;
\n\t
"
"mvtc
r6
, psw;
\n\t
"
"mvtc
%2
, psw;
\n\t
"
:
/* no outputs */
:
"=&r"
(
tmp0
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
:
"r"
(
&
rw
->
lock
)
:
"r"
(
&
rw
->
lock
)
:
"memory"
,
"r4"
,
"r5"
,
"r6"
:
"memory"
#ifdef CONFIG_CHIP_M32700_TS1
#ifdef CONFIG_CHIP_M32700_TS1
,
"r7"
,
"r7"
#endif
/* CONFIG_CHIP_M32700_TS1 */
#endif
/* CONFIG_CHIP_M32700_TS1 */
);
);
}
}
static
__inline__
int
_raw_write_trylock
(
rwlock_t
*
lock
)
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
static
inline
int
_raw_write_trylock
(
rwlock_t
*
lock
)
{
{
atomic_t
*
count
=
(
atomic_t
*
)
lock
;
atomic_t
*
count
=
(
atomic_t
*
)
lock
;
if
(
atomic_sub_and_test
(
RW_LOCK_BIAS
,
count
))
if
(
atomic_sub_and_test
(
RW_LOCK_BIAS
,
count
))
...
...
include/asm-mips/vr41xx/vrc4173.h
View file @
72747577
...
@@ -48,6 +48,8 @@
...
@@ -48,6 +48,8 @@
/*
/*
* PCI I/O accesses
* PCI I/O accesses
*/
*/
#ifdef CONFIG_VRC4173
extern
unsigned
long
vrc4173_io_offset
;
extern
unsigned
long
vrc4173_io_offset
;
#define set_vrc4173_io_offset(offset) do { vrc4173_io_offset = (offset); } while (0)
#define set_vrc4173_io_offset(offset) do { vrc4173_io_offset = (offset); } while (0)
...
@@ -74,6 +76,34 @@ extern unsigned long vrc4173_io_offset;
...
@@ -74,6 +76,34 @@ extern unsigned long vrc4173_io_offset;
#define vrc4173_insw(port,addr,count) insw(vrc4173_io_offset+(port),(addr),(count))
#define vrc4173_insw(port,addr,count) insw(vrc4173_io_offset+(port),(addr),(count))
#define vrc4173_insl(port,addr,count) insl(vrc4173_io_offset+(port),(addr),(count))
#define vrc4173_insl(port,addr,count) insl(vrc4173_io_offset+(port),(addr),(count))
#else
#define set_vrc4173_io_offset(offset) do {} while (0)
#define vrc4173_outb(val,port) do {} while (0)
#define vrc4173_outw(val,port) do {} while (0)
#define vrc4173_outl(val,port) do {} while (0)
#define vrc4173_outb_p(val,port) do {} while (0)
#define vrc4173_outw_p(val,port) do {} while (0)
#define vrc4173_outl_p(val,port) do {} while (0)
#define vrc4173_inb(port) 0
#define vrc4173_inw(port) 0
#define vrc4173_inl(port) 0
#define vrc4173_inb_p(port) 0
#define vrc4173_inw_p(port) 0
#define vrc4173_inl_p(port) 0
#define vrc4173_outsb(port,addr,count) do {} while (0)
#define vrc4173_outsw(port,addr,count) do {} while (0)
#define vrc4173_outsl(port,addr,count) do {} while (0)
#define vrc4173_insb(port,addr,count) do {} while (0)
#define vrc4173_insw(port,addr,count) do {} while (0)
#define vrc4173_insl(port,addr,count) do {} while (0)
#endif
/*
/*
* Clock Mask Unit
* Clock Mask Unit
*/
*/
...
@@ -92,9 +122,77 @@ typedef enum vrc4173_clock {
...
@@ -92,9 +122,77 @@ typedef enum vrc4173_clock {
VRC4173_48MHz_CLOCK
,
VRC4173_48MHz_CLOCK
,
}
vrc4173_clock_t
;
}
vrc4173_clock_t
;
#ifdef CONFIG_VRC4173
extern
void
vrc4173_supply_clock
(
vrc4173_clock_t
clock
);
extern
void
vrc4173_supply_clock
(
vrc4173_clock_t
clock
);
extern
void
vrc4173_mask_clock
(
vrc4173_clock_t
clock
);
extern
void
vrc4173_mask_clock
(
vrc4173_clock_t
clock
);
#else
static
inline
void
vrc4173_supply_clock
(
vrc4173_clock_t
clock
)
{}
static
inline
void
vrc4173_mask_clock
(
vrc4173_clock_t
clock
)
{}
#endif
/*
* Interupt Control Unit
*/
#define VRC4173_PIUINT_COMMAND 0x0040
#define VRC4173_PIUINT_DATA 0x0020
#define VRC4173_PIUINT_PAGE1 0x0010
#define VRC4173_PIUINT_PAGE0 0x0008
#define VRC4173_PIUINT_DATALOST 0x0004
#define VRC4173_PIUINT_STATUSCHANGE 0x0001
#ifdef CONFIG_VRC4173
extern
void
vrc4173_enable_piuint
(
uint16_t
mask
);
extern
void
vrc4173_disable_piuint
(
uint16_t
mask
);
#else
static
inline
void
vrc4173_enable_piuint
(
uint16_t
mask
)
{}
static
inline
void
vrc4173_disable_piuint
(
uint16_t
mask
)
{}
#endif
#define VRC4173_AIUINT_INPUT_DMAEND 0x0800
#define VRC4173_AIUINT_INPUT_DMAHALT 0x0400
#define VRC4173_AIUINT_INPUT_DATALOST 0x0200
#define VRC4173_AIUINT_INPUT_DATA 0x0100
#define VRC4173_AIUINT_OUTPUT_DMAEND 0x0008
#define VRC4173_AIUINT_OUTPUT_DMAHALT 0x0004
#define VRC4173_AIUINT_OUTPUT_NODATA 0x0002
#ifdef CONFIG_VRC4173
extern
void
vrc4173_enable_aiuint
(
uint16_t
mask
);
extern
void
vrc4173_disable_aiuint
(
uint16_t
mask
);
#else
static
inline
void
vrc4173_enable_aiuint
(
uint16_t
mask
)
{}
static
inline
void
vrc4173_disable_aiuint
(
uint16_t
mask
)
{}
#endif
#define VRC4173_KIUINT_DATALOST 0x0004
#define VRC4173_KIUINT_DATAREADY 0x0002
#define VRC4173_KIUINT_SCAN 0x0001
#ifdef CONFIG_VRC4173
extern
void
vrc4173_enable_kiuint
(
uint16_t
mask
);
extern
void
vrc4173_disable_kiuint
(
uint16_t
mask
);
#else
static
inline
void
vrc4173_enable_kiuint
(
uint16_t
mask
)
{}
static
inline
void
vrc4173_disable_kiuint
(
uint16_t
mask
)
{}
#endif
/*
/*
* General-Purpose I/O Unit
* General-Purpose I/O Unit
*/
*/
...
@@ -109,6 +207,14 @@ typedef enum vrc4173_function {
...
@@ -109,6 +207,14 @@ typedef enum vrc4173_function {
GPIO_16_20PINS
,
GPIO_16_20PINS
,
}
vrc4173_function_t
;
}
vrc4173_function_t
;
#ifdef CONFIG_VRC4173
extern
void
vrc4173_select_function
(
vrc4173_function_t
function
);
extern
void
vrc4173_select_function
(
vrc4173_function_t
function
);
#else
static
inline
void
vrc4173_select_function
(
vrc4173_function_t
function
)
{}
#endif
#endif
/* __NEC_VRC4173_H */
#endif
/* __NEC_VRC4173_H */
include/asm-ppc64/eeh.h
View file @
72747577
...
@@ -71,16 +71,10 @@ int eeh_set_option(struct pci_dev *dev, int options);
...
@@ -71,16 +71,10 @@ int eeh_set_option(struct pci_dev *dev, int options);
/*
/*
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
*
*
* Order this macro for performance.
* If EEH is off for a device and it is a memory BAR, ioremap will
* map it to the IOREGION. In this case addr == vaddr and since these
* should be in registers we compare them first. Next we check for
* ff's which indicates a (very) possible failure.
*
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
* which does further tests out of line.
*/
*/
#define EEH_POSSIBLE_
IO_
ERROR(val, type) ((val) == (type)~0)
#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0)
/*
/*
* Reads from a device which has been isolated by EEH will return
* Reads from a device which has been isolated by EEH will return
...
@@ -89,21 +83,13 @@ int eeh_set_option(struct pci_dev *dev, int options);
...
@@ -89,21 +83,13 @@ int eeh_set_option(struct pci_dev *dev, int options);
*/
*/
#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
/*
* The vaddr will equal the addr if EEH checking is disabled for
* this device. This is because eeh_ioremap() will not have
* remapped to 0xA0, and thus both vaddr and addr will be 0xE0...
*/
#define EEH_POSSIBLE_ERROR(addr, vaddr, val, type) \
((vaddr) != (addr) && EEH_POSSIBLE_IO_ERROR(val, type))
/*
/*
* MMIO read/write operations with EEH support.
* MMIO read/write operations with EEH support.
*/
*/
static
inline
u8
eeh_readb
(
const
volatile
void
__iomem
*
addr
)
{
static
inline
u8
eeh_readb
(
const
volatile
void
__iomem
*
addr
)
{
volatile
u8
*
vaddr
=
(
volatile
u8
__force
*
)
addr
;
volatile
u8
*
vaddr
=
(
volatile
u8
__force
*
)
addr
;
u8
val
=
in_8
(
vaddr
);
u8
val
=
in_8
(
vaddr
);
if
(
EEH_POSSIBLE_ERROR
(
addr
,
vaddr
,
val
,
u8
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u8
))
return
eeh_check_failure
(
addr
,
val
);
return
eeh_check_failure
(
addr
,
val
);
return
val
;
return
val
;
}
}
...
@@ -115,7 +101,7 @@ static inline void eeh_writeb(u8 val, volatile void __iomem *addr) {
...
@@ -115,7 +101,7 @@ static inline void eeh_writeb(u8 val, volatile void __iomem *addr) {
static
inline
u16
eeh_readw
(
const
volatile
void
__iomem
*
addr
)
{
static
inline
u16
eeh_readw
(
const
volatile
void
__iomem
*
addr
)
{
volatile
u16
*
vaddr
=
(
volatile
u16
__force
*
)
addr
;
volatile
u16
*
vaddr
=
(
volatile
u16
__force
*
)
addr
;
u16
val
=
in_le16
(
vaddr
);
u16
val
=
in_le16
(
vaddr
);
if
(
EEH_POSSIBLE_ERROR
(
addr
,
vaddr
,
val
,
u16
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u16
))
return
eeh_check_failure
(
addr
,
val
);
return
eeh_check_failure
(
addr
,
val
);
return
val
;
return
val
;
}
}
...
@@ -126,7 +112,7 @@ static inline void eeh_writew(u16 val, volatile void __iomem *addr) {
...
@@ -126,7 +112,7 @@ static inline void eeh_writew(u16 val, volatile void __iomem *addr) {
static
inline
u16
eeh_raw_readw
(
const
volatile
void
__iomem
*
addr
)
{
static
inline
u16
eeh_raw_readw
(
const
volatile
void
__iomem
*
addr
)
{
volatile
u16
*
vaddr
=
(
volatile
u16
__force
*
)
addr
;
volatile
u16
*
vaddr
=
(
volatile
u16
__force
*
)
addr
;
u16
val
=
in_be16
(
vaddr
);
u16
val
=
in_be16
(
vaddr
);
if
(
EEH_POSSIBLE_ERROR
(
addr
,
vaddr
,
val
,
u16
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u16
))
return
eeh_check_failure
(
addr
,
val
);
return
eeh_check_failure
(
addr
,
val
);
return
val
;
return
val
;
}
}
...
@@ -138,7 +124,7 @@ static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
...
@@ -138,7 +124,7 @@ static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
static
inline
u32
eeh_readl
(
const
volatile
void
__iomem
*
addr
)
{
static
inline
u32
eeh_readl
(
const
volatile
void
__iomem
*
addr
)
{
volatile
u32
*
vaddr
=
(
volatile
u32
__force
*
)
addr
;
volatile
u32
*
vaddr
=
(
volatile
u32
__force
*
)
addr
;
u32
val
=
in_le32
(
vaddr
);
u32
val
=
in_le32
(
vaddr
);
if
(
EEH_POSSIBLE_ERROR
(
addr
,
vaddr
,
val
,
u32
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u32
))
return
eeh_check_failure
(
addr
,
val
);
return
eeh_check_failure
(
addr
,
val
);
return
val
;
return
val
;
}
}
...
@@ -149,7 +135,7 @@ static inline void eeh_writel(u32 val, volatile void __iomem *addr) {
...
@@ -149,7 +135,7 @@ static inline void eeh_writel(u32 val, volatile void __iomem *addr) {
static
inline
u32
eeh_raw_readl
(
const
volatile
void
__iomem
*
addr
)
{
static
inline
u32
eeh_raw_readl
(
const
volatile
void
__iomem
*
addr
)
{
volatile
u32
*
vaddr
=
(
volatile
u32
__force
*
)
addr
;
volatile
u32
*
vaddr
=
(
volatile
u32
__force
*
)
addr
;
u32
val
=
in_be32
(
vaddr
);
u32
val
=
in_be32
(
vaddr
);
if
(
EEH_POSSIBLE_ERROR
(
addr
,
vaddr
,
val
,
u32
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u32
))
return
eeh_check_failure
(
addr
,
val
);
return
eeh_check_failure
(
addr
,
val
);
return
val
;
return
val
;
}
}
...
@@ -161,7 +147,7 @@ static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr) {
...
@@ -161,7 +147,7 @@ static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr) {
static
inline
u64
eeh_readq
(
const
volatile
void
__iomem
*
addr
)
{
static
inline
u64
eeh_readq
(
const
volatile
void
__iomem
*
addr
)
{
volatile
u64
*
vaddr
=
(
volatile
u64
__force
*
)
addr
;
volatile
u64
*
vaddr
=
(
volatile
u64
__force
*
)
addr
;
u64
val
=
in_le64
(
vaddr
);
u64
val
=
in_le64
(
vaddr
);
if
(
EEH_POSSIBLE_ERROR
(
addr
,
vaddr
,
val
,
u64
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u64
))
return
eeh_check_failure
(
addr
,
val
);
return
eeh_check_failure
(
addr
,
val
);
return
val
;
return
val
;
}
}
...
@@ -172,7 +158,7 @@ static inline void eeh_writeq(u64 val, volatile void __iomem *addr) {
...
@@ -172,7 +158,7 @@ static inline void eeh_writeq(u64 val, volatile void __iomem *addr) {
static
inline
u64
eeh_raw_readq
(
const
volatile
void
__iomem
*
addr
)
{
static
inline
u64
eeh_raw_readq
(
const
volatile
void
__iomem
*
addr
)
{
volatile
u64
*
vaddr
=
(
volatile
u64
__force
*
)
addr
;
volatile
u64
*
vaddr
=
(
volatile
u64
__force
*
)
addr
;
u64
val
=
in_be64
(
vaddr
);
u64
val
=
in_be64
(
vaddr
);
if
(
EEH_POSSIBLE_ERROR
(
addr
,
vaddr
,
val
,
u64
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u64
))
return
eeh_check_failure
(
addr
,
val
);
return
eeh_check_failure
(
addr
,
val
);
return
val
;
return
val
;
}
}
...
@@ -209,7 +195,7 @@ static inline void eeh_memset_io(volatile void __iomem *addr, int c, unsigned lo
...
@@ -209,7 +195,7 @@ static inline void eeh_memset_io(volatile void __iomem *addr, int c, unsigned lo
}
}
static
inline
void
eeh_memcpy_fromio
(
void
*
dest
,
const
volatile
void
__iomem
*
src
,
unsigned
long
n
)
{
static
inline
void
eeh_memcpy_fromio
(
void
*
dest
,
const
volatile
void
__iomem
*
src
,
unsigned
long
n
)
{
void
*
vsrc
=
(
void
__force
*
)
src
;
void
*
vsrc
=
(
void
__force
*
)
src
;
void
*
vsrcsave
=
vsrc
,
*
destsave
=
dest
;
void
*
destsave
=
dest
;
const
volatile
void
__iomem
*
srcsave
=
src
;
const
volatile
void
__iomem
*
srcsave
=
src
;
unsigned
long
nsave
=
n
;
unsigned
long
nsave
=
n
;
...
@@ -240,8 +226,7 @@ static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *sr
...
@@ -240,8 +226,7 @@ static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *sr
* were copied. Check all four bytes.
* were copied. Check all four bytes.
*/
*/
if
((
nsave
>=
4
)
&&
if
((
nsave
>=
4
)
&&
(
EEH_POSSIBLE_ERROR
(
srcsave
,
vsrcsave
,
(
*
((
u32
*
)
destsave
+
nsave
-
4
)),
(
EEH_POSSIBLE_ERROR
((
*
((
u32
*
)
destsave
+
nsave
-
4
)),
u32
)))
{
u32
)))
{
eeh_check_failure
(
srcsave
,
(
*
((
u32
*
)
destsave
+
nsave
-
4
)));
eeh_check_failure
(
srcsave
,
(
*
((
u32
*
)
destsave
+
nsave
-
4
)));
}
}
}
}
...
@@ -281,7 +266,7 @@ static inline u8 eeh_inb(unsigned long port) {
...
@@ -281,7 +266,7 @@ static inline u8 eeh_inb(unsigned long port) {
if
(
!
_IO_IS_VALID
(
port
))
if
(
!
_IO_IS_VALID
(
port
))
return
~
0
;
return
~
0
;
val
=
in_8
((
u8
*
)(
port
+
pci_io_base
));
val
=
in_8
((
u8
*
)(
port
+
pci_io_base
));
if
(
EEH_POSSIBLE_
IO_
ERROR
(
val
,
u8
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u8
))
return
eeh_check_failure
((
void
__iomem
*
)(
port
),
val
);
return
eeh_check_failure
((
void
__iomem
*
)(
port
),
val
);
return
val
;
return
val
;
}
}
...
@@ -296,7 +281,7 @@ static inline u16 eeh_inw(unsigned long port) {
...
@@ -296,7 +281,7 @@ static inline u16 eeh_inw(unsigned long port) {
if
(
!
_IO_IS_VALID
(
port
))
if
(
!
_IO_IS_VALID
(
port
))
return
~
0
;
return
~
0
;
val
=
in_le16
((
u16
*
)(
port
+
pci_io_base
));
val
=
in_le16
((
u16
*
)(
port
+
pci_io_base
));
if
(
EEH_POSSIBLE_
IO_
ERROR
(
val
,
u16
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u16
))
return
eeh_check_failure
((
void
__iomem
*
)(
port
),
val
);
return
eeh_check_failure
((
void
__iomem
*
)(
port
),
val
);
return
val
;
return
val
;
}
}
...
@@ -311,7 +296,7 @@ static inline u32 eeh_inl(unsigned long port) {
...
@@ -311,7 +296,7 @@ static inline u32 eeh_inl(unsigned long port) {
if
(
!
_IO_IS_VALID
(
port
))
if
(
!
_IO_IS_VALID
(
port
))
return
~
0
;
return
~
0
;
val
=
in_le32
((
u32
*
)(
port
+
pci_io_base
));
val
=
in_le32
((
u32
*
)(
port
+
pci_io_base
));
if
(
EEH_POSSIBLE_
IO_
ERROR
(
val
,
u32
))
if
(
EEH_POSSIBLE_ERROR
(
val
,
u32
))
return
eeh_check_failure
((
void
__iomem
*
)(
port
),
val
);
return
eeh_check_failure
((
void
__iomem
*
)(
port
),
val
);
return
val
;
return
val
;
}
}
...
@@ -324,19 +309,19 @@ static inline void eeh_outl(u32 val, unsigned long port) {
...
@@ -324,19 +309,19 @@ static inline void eeh_outl(u32 val, unsigned long port) {
/* in-string eeh macros */
/* in-string eeh macros */
static
inline
void
eeh_insb
(
unsigned
long
port
,
void
*
buf
,
int
ns
)
{
static
inline
void
eeh_insb
(
unsigned
long
port
,
void
*
buf
,
int
ns
)
{
_insb
((
u8
*
)(
port
+
pci_io_base
),
buf
,
ns
);
_insb
((
u8
*
)(
port
+
pci_io_base
),
buf
,
ns
);
if
(
EEH_POSSIBLE_
IO_
ERROR
((
*
(((
u8
*
)
buf
)
+
ns
-
1
)),
u8
))
if
(
EEH_POSSIBLE_ERROR
((
*
(((
u8
*
)
buf
)
+
ns
-
1
)),
u8
))
eeh_check_failure
((
void
__iomem
*
)(
port
),
*
(
u8
*
)
buf
);
eeh_check_failure
((
void
__iomem
*
)(
port
),
*
(
u8
*
)
buf
);
}
}
static
inline
void
eeh_insw_ns
(
unsigned
long
port
,
void
*
buf
,
int
ns
)
{
static
inline
void
eeh_insw_ns
(
unsigned
long
port
,
void
*
buf
,
int
ns
)
{
_insw_ns
((
u16
*
)(
port
+
pci_io_base
),
buf
,
ns
);
_insw_ns
((
u16
*
)(
port
+
pci_io_base
),
buf
,
ns
);
if
(
EEH_POSSIBLE_
IO_
ERROR
((
*
(((
u16
*
)
buf
)
+
ns
-
1
)),
u16
))
if
(
EEH_POSSIBLE_ERROR
((
*
(((
u16
*
)
buf
)
+
ns
-
1
)),
u16
))
eeh_check_failure
((
void
__iomem
*
)(
port
),
*
(
u16
*
)
buf
);
eeh_check_failure
((
void
__iomem
*
)(
port
),
*
(
u16
*
)
buf
);
}
}
static
inline
void
eeh_insl_ns
(
unsigned
long
port
,
void
*
buf
,
int
nl
)
{
static
inline
void
eeh_insl_ns
(
unsigned
long
port
,
void
*
buf
,
int
nl
)
{
_insl_ns
((
u32
*
)(
port
+
pci_io_base
),
buf
,
nl
);
_insl_ns
((
u32
*
)(
port
+
pci_io_base
),
buf
,
nl
);
if
(
EEH_POSSIBLE_
IO_
ERROR
((
*
(((
u32
*
)
buf
)
+
nl
-
1
)),
u32
))
if
(
EEH_POSSIBLE_ERROR
((
*
(((
u32
*
)
buf
)
+
nl
-
1
)),
u32
))
eeh_check_failure
((
void
__iomem
*
)(
port
),
*
(
u32
*
)
buf
);
eeh_check_failure
((
void
__iomem
*
)(
port
),
*
(
u32
*
)
buf
);
}
}
...
...
include/asm-x86_64/msi.h
View file @
72747577
...
@@ -7,10 +7,11 @@
...
@@ -7,10 +7,11 @@
#define ASM_MSI_H
#define ASM_MSI_H
#include <asm/desc.h>
#include <asm/desc.h>
#include <asm/smp.h>
#define LAST_DEVICE_VECTOR 232
#define LAST_DEVICE_VECTOR 232
#define MSI_DEST_MODE MSI_LOGICAL_MODE
#define MSI_DEST_MODE MSI_LOGICAL_MODE
#define MSI_TARGET_CPU_SHIFT 12
#define MSI_TARGET_CPU_SHIFT 12
#define MSI_TARGET_CPU
TARGET_CPUS
#define MSI_TARGET_CPU
logical_smp_processor_id()
#endif
/* ASM_MSI_H */
#endif
/* ASM_MSI_H */
include/asm-x86_64/smp.h
View file @
72747577
...
@@ -133,5 +133,13 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
...
@@ -133,5 +133,13 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
})
})
#endif
#endif
#ifndef __ASSEMBLY__
static
__inline
int
logical_smp_processor_id
(
void
)
{
/* we don't want to mark this access volatile - bad code generation */
return
GET_APIC_LOGICAL_ID
(
*
(
unsigned
long
*
)(
APIC_BASE
+
APIC_LDR
));
}
#endif
#endif
#endif
include/linux/notifier.h
View file @
72747577
...
@@ -29,6 +29,10 @@ extern int notifier_call_chain(struct notifier_block **n, unsigned long val, voi
...
@@ -29,6 +29,10 @@ extern int notifier_call_chain(struct notifier_block **n, unsigned long val, voi
#define NOTIFY_OK 0x0001
/* Suits me */
#define NOTIFY_OK 0x0001
/* Suits me */
#define NOTIFY_STOP_MASK 0x8000
/* Don't call further */
#define NOTIFY_STOP_MASK 0x8000
/* Don't call further */
#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
/* Bad/Veto action */
#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
/* Bad/Veto action */
/*
* Clean way to return from the notifier and stop further calls.
*/
#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
/*
/*
* Declared notifiers so far. I can imagine quite a few more chains
* Declared notifiers so far. I can imagine quite a few more chains
...
...
include/linux/timex.h
View file @
72747577
...
@@ -55,10 +55,8 @@
...
@@ -55,10 +55,8 @@
#include <linux/config.h>
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/compiler.h>
#include <linux/jiffies.h>
#include <asm/param.h>
#include <asm/param.h>
#include <asm/io.h>
/*
/*
* The following defines establish the engineering parameters of the PLL
* The following defines establish the engineering parameters of the PLL
...
...
kernel/kprobes.c
View file @
72747577
...
@@ -25,6 +25,8 @@
...
@@ -25,6 +25,8 @@
* hlists and exceptions notifier as suggested by Andi Kleen.
* hlists and exceptions notifier as suggested by Andi Kleen.
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
* interface to access function arguments.
* interface to access function arguments.
* 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
* exceptions notifier to be first on the priority list.
*/
*/
#include <linux/kprobes.h>
#include <linux/kprobes.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
...
@@ -108,6 +110,7 @@ void unregister_kprobe(struct kprobe *p)
...
@@ -108,6 +110,7 @@ void unregister_kprobe(struct kprobe *p)
static
struct
notifier_block
kprobe_exceptions_nb
=
{
static
struct
notifier_block
kprobe_exceptions_nb
=
{
.
notifier_call
=
kprobe_exceptions_notify
,
.
notifier_call
=
kprobe_exceptions_notify
,
.
priority
=
0x7fffffff
/* we need to notified first */
};
};
int
register_jprobe
(
struct
jprobe
*
jp
)
int
register_jprobe
(
struct
jprobe
*
jp
)
...
...
kernel/power/swsusp.c
View file @
72747577
...
@@ -856,7 +856,9 @@ int swsusp_suspend(void)
...
@@ -856,7 +856,9 @@ int swsusp_suspend(void)
local_irq_disable
();
local_irq_disable
();
save_processor_state
();
save_processor_state
();
error
=
swsusp_arch_suspend
();
error
=
swsusp_arch_suspend
();
/* Restore control flow magically appears here */
restore_processor_state
();
restore_processor_state
();
restore_highmem
();
local_irq_enable
();
local_irq_enable
();
return
error
;
return
error
;
}
}
...
@@ -876,8 +878,13 @@ int swsusp_resume(void)
...
@@ -876,8 +878,13 @@ int swsusp_resume(void)
{
{
int
error
;
int
error
;
local_irq_disable
();
local_irq_disable
();
/* We'll ignore saved state, but this gets preempt count (etc) right */
save_processor_state
();
save_processor_state
();
error
=
swsusp_arch_resume
();
error
=
swsusp_arch_resume
();
/* Code below is only ever reached in case of failure. Otherwise
* execution continues at place where swsusp_arch_suspend was called
*/
BUG_ON
(
!
error
);
restore_processor_state
();
restore_processor_state
();
restore_highmem
();
restore_highmem
();
local_irq_enable
();
local_irq_enable
();
...
...
kernel/timer.c
View file @
72747577
...
@@ -36,6 +36,7 @@
...
@@ -36,6 +36,7 @@
#include <asm/unistd.h>
#include <asm/unistd.h>
#include <asm/div64.h>
#include <asm/div64.h>
#include <asm/timex.h>
#include <asm/timex.h>
#include <asm/io.h>
#ifdef CONFIG_TIME_INTERPOLATION
#ifdef CONFIG_TIME_INTERPOLATION
static
void
time_interpolator_update
(
long
delta_nsec
);
static
void
time_interpolator_update
(
long
delta_nsec
);
...
...
mm/vmscan.c
View file @
72747577
...
@@ -968,12 +968,16 @@ int try_to_free_pages(struct zone **zones,
...
@@ -968,12 +968,16 @@ int try_to_free_pages(struct zone **zones,
static
int
balance_pgdat
(
pg_data_t
*
pgdat
,
int
nr_pages
)
static
int
balance_pgdat
(
pg_data_t
*
pgdat
,
int
nr_pages
)
{
{
int
to_free
=
nr_pages
;
int
to_free
=
nr_pages
;
int
all_zones_ok
;
int
priority
;
int
priority
;
int
i
;
int
i
;
int
total_scanned
=
0
,
total_reclaimed
=
0
;
int
total_scanned
,
total_reclaimed
;
struct
reclaim_state
*
reclaim_state
=
current
->
reclaim_state
;
struct
reclaim_state
*
reclaim_state
=
current
->
reclaim_state
;
struct
scan_control
sc
;
struct
scan_control
sc
;
loop_again:
total_scanned
=
0
;
total_reclaimed
=
0
;
sc
.
gfp_mask
=
GFP_KERNEL
;
sc
.
gfp_mask
=
GFP_KERNEL
;
sc
.
may_writepage
=
0
;
sc
.
may_writepage
=
0
;
sc
.
nr_mapped
=
read_page_state
(
nr_mapped
);
sc
.
nr_mapped
=
read_page_state
(
nr_mapped
);
...
@@ -987,10 +991,11 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
...
@@ -987,10 +991,11 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
}
}
for
(
priority
=
DEF_PRIORITY
;
priority
>=
0
;
priority
--
)
{
for
(
priority
=
DEF_PRIORITY
;
priority
>=
0
;
priority
--
)
{
int
all_zones_ok
=
1
;
int
end_zone
=
0
;
/* Inclusive. 0 = ZONE_DMA */
int
end_zone
=
0
;
/* Inclusive. 0 = ZONE_DMA */
unsigned
long
lru_pages
=
0
;
unsigned
long
lru_pages
=
0
;
all_zones_ok
=
1
;
if
(
nr_pages
==
0
)
{
if
(
nr_pages
==
0
)
{
/*
/*
* Scan in the highmem->dma direction for the highest
* Scan in the highmem->dma direction for the highest
...
@@ -1072,6 +1077,15 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
...
@@ -1072,6 +1077,15 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
*/
*/
if
(
total_scanned
&&
priority
<
DEF_PRIORITY
-
2
)
if
(
total_scanned
&&
priority
<
DEF_PRIORITY
-
2
)
blk_congestion_wait
(
WRITE
,
HZ
/
10
);
blk_congestion_wait
(
WRITE
,
HZ
/
10
);
/*
* We do this so kswapd doesn't build up large priorities for
* example when it is freeing in parallel with allocators. It
* matches the direct reclaim path behaviour in terms of impact
* on zone->*_priority.
*/
if
(
total_reclaimed
>=
SWAP_CLUSTER_MAX
)
break
;
}
}
out:
out:
for
(
i
=
0
;
i
<
pgdat
->
nr_zones
;
i
++
)
{
for
(
i
=
0
;
i
<
pgdat
->
nr_zones
;
i
++
)
{
...
@@ -1079,6 +1093,9 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
...
@@ -1079,6 +1093,9 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
zone
->
prev_priority
=
zone
->
temp_priority
;
zone
->
prev_priority
=
zone
->
temp_priority
;
}
}
if
(
!
all_zones_ok
)
goto
loop_again
;
return
total_reclaimed
;
return
total_reclaimed
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment