Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
234fcd14
Commit
234fcd14
authored
Mar 08, 2008
by
Ralf Baechle
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[MIPS] Fix loads of section missmatches
Signed-off-by:
Ralf Baechle
<
ralf@linux-mips.org
>
parent
1af0eea2
Changes
30
Show whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
171 additions
and
180 deletions
+171
-180
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-bugs64.c
+2
-2
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/cpu-probe.c
+5
-5
arch/mips/kernel/head.S
arch/mips/kernel/head.S
+1
-1
arch/mips/kernel/traps.c
arch/mips/kernel/traps.c
+4
-3
arch/mips/lib/uncached.c
arch/mips/lib/uncached.c
+1
-1
arch/mips/mips-boards/generic/time.c
arch/mips/mips-boards/generic/time.c
+1
-1
arch/mips/mipssim/sim_time.c
arch/mips/mipssim/sim_time.c
+1
-1
arch/mips/mm/c-r3k.c
arch/mips/mm/c-r3k.c
+1
-1
arch/mips/mm/c-r4k.c
arch/mips/mm/c-r4k.c
+15
-15
arch/mips/mm/c-tx39.c
arch/mips/mm/c-tx39.c
+1
-1
arch/mips/mm/cache.c
arch/mips/mm/cache.c
+3
-2
arch/mips/mm/cex-sb1.S
arch/mips/mm/cex-sb1.S
+2
-2
arch/mips/mm/pg-r4k.c
arch/mips/mm/pg-r4k.c
+11
-11
arch/mips/mm/pg-sb1.c
arch/mips/mm/pg-sb1.c
+2
-2
arch/mips/mm/sc-ip22.c
arch/mips/mm/sc-ip22.c
+1
-1
arch/mips/mm/sc-mips.c
arch/mips/mm/sc-mips.c
+1
-2
arch/mips/mm/sc-r5k.c
arch/mips/mm/sc-r5k.c
+1
-1
arch/mips/mm/sc-rm7k.c
arch/mips/mm/sc-rm7k.c
+1
-1
arch/mips/mm/tlb-r3k.c
arch/mips/mm/tlb-r3k.c
+1
-1
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlb-r4k.c
+4
-4
arch/mips/mm/tlb-r8k.c
arch/mips/mm/tlb-r8k.c
+2
-2
arch/mips/mm/tlbex.c
arch/mips/mm/tlbex.c
+35
-35
arch/mips/mm/uasm.c
arch/mips/mm/uasm.c
+34
-34
arch/mips/mm/uasm.h
arch/mips/mm/uasm.h
+33
-43
arch/mips/pci/pci-ip27.c
arch/mips/pci/pci-ip27.c
+1
-1
arch/mips/pci/pci.c
arch/mips/pci/pci.c
+1
-1
arch/mips/sgi-ip27/ip27-init.c
arch/mips/sgi-ip27/ip27-init.c
+1
-1
arch/mips/sgi-ip27/ip27-timer.c
arch/mips/sgi-ip27/ip27-timer.c
+1
-1
arch/mips/sgi-ip27/ip27-xtalk.c
arch/mips/sgi-ip27/ip27-xtalk.c
+3
-3
include/asm-mips/cacheflush.h
include/asm-mips/cacheflush.h
+1
-1
No files found.
arch/mips/kernel/cpu-bugs64.c
View file @
234fcd14
...
...
@@ -167,7 +167,7 @@ static inline void check_mult_sh(void)
panic
(
bug64hit
,
!
R4000_WAR
?
r4kwar
:
nowar
);
}
static
volatile
int
daddi_ov
__initdata
=
0
;
static
volatile
int
daddi_ov
__
cpu
initdata
=
0
;
asmlinkage
void
__init
do_daddi_ov
(
struct
pt_regs
*
regs
)
{
...
...
@@ -239,7 +239,7 @@ static inline void check_daddi(void)
panic
(
bug64hit
,
!
DADDI_WAR
?
daddiwar
:
nowar
);
}
int
daddiu_bug
__initdata
=
-
1
;
int
daddiu_bug
__
cpu
initdata
=
-
1
;
static
inline
void
check_daddiu
(
void
)
{
...
...
arch/mips/kernel/cpu-probe.c
View file @
234fcd14
...
...
@@ -550,7 +550,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
}
}
static
char
unknown_isa
[]
__initdata
=
KERN_ERR
\
static
char
unknown_isa
[]
__
cpu
initdata
=
KERN_ERR
\
"Unsupported ISA type, c0.config0: %d."
;
static
inline
unsigned
int
decode_config0
(
struct
cpuinfo_mips
*
c
)
...
...
@@ -656,7 +656,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
return
config3
&
MIPS_CONF_M
;
}
static
void
__init
decode_configs
(
struct
cpuinfo_mips
*
c
)
static
void
__
cpu
init
decode_configs
(
struct
cpuinfo_mips
*
c
)
{
/* MIPS32 or MIPS64 compliant CPU. */
c
->
options
=
MIPS_CPU_4KEX
|
MIPS_CPU_4K_CACHE
|
MIPS_CPU_COUNTER
|
...
...
@@ -814,7 +814,7 @@ const char *__cpu_name[NR_CPUS];
/*
* Name a CPU
*/
static
__init
const
char
*
cpu_to_name
(
struct
cpuinfo_mips
*
c
)
static
__
cpu
init
const
char
*
cpu_to_name
(
struct
cpuinfo_mips
*
c
)
{
const
char
*
name
=
NULL
;
...
...
@@ -896,7 +896,7 @@ static __init const char *cpu_to_name(struct cpuinfo_mips *c)
return
name
;
}
__init
void
cpu_probe
(
void
)
__
cpu
init
void
cpu_probe
(
void
)
{
struct
cpuinfo_mips
*
c
=
&
current_cpu_data
;
unsigned
int
cpu
=
smp_processor_id
();
...
...
@@ -959,7 +959,7 @@ __init void cpu_probe(void)
c
->
srsets
=
1
;
}
__init
void
cpu_report
(
void
)
__
cpu
init
void
cpu_report
(
void
)
{
struct
cpuinfo_mips
*
c
=
&
current_cpu_data
;
...
...
arch/mips/kernel/head.S
View file @
234fcd14
...
...
@@ -195,7 +195,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
j
start_kernel
END
(
kernel_entry
)
__INIT
__
CPU
INIT
#ifdef CONFIG_SMP
/*
...
...
arch/mips/kernel/traps.c
View file @
234fcd14
...
...
@@ -1306,7 +1306,7 @@ int cp0_compare_irq;
int
cp0_perfcount_irq
;
EXPORT_SYMBOL_GPL
(
cp0_perfcount_irq
);
void
__init
per_cpu_trap_init
(
void
)
void
__
cpu
init
per_cpu_trap_init
(
void
)
{
unsigned
int
cpu
=
smp_processor_id
();
unsigned
int
status_set
=
ST0_CU0
;
...
...
@@ -1423,11 +1423,12 @@ void __init set_handler(unsigned long offset, void *addr, unsigned long size)
flush_icache_range
(
ebase
+
offset
,
ebase
+
offset
+
size
);
}
static
char
panic_null_cerr
[]
__initdata
=
static
char
panic_null_cerr
[]
__
cpu
initdata
=
"Trying to set NULL cache error exception handler"
;
/* Install uncached CPU exception handler */
void
__init
set_uncached_handler
(
unsigned
long
offset
,
void
*
addr
,
unsigned
long
size
)
void
__cpuinit
set_uncached_handler
(
unsigned
long
offset
,
void
*
addr
,
unsigned
long
size
)
{
#ifdef CONFIG_32BIT
unsigned
long
uncached_ebase
=
KSEG1ADDR
(
ebase
);
...
...
arch/mips/lib/uncached.c
View file @
234fcd14
...
...
@@ -36,7 +36,7 @@
* values, so we can avoid sharing the same stack area between a cached
* and the uncached mode.
*/
unsigned
long
__init
run_uncached
(
void
*
func
)
unsigned
long
__
cpu
init
run_uncached
(
void
*
func
)
{
register
long
sp
__asm__
(
"$sp"
);
register
long
ret
__asm__
(
"$2"
);
...
...
arch/mips/mips-boards/generic/time.c
View file @
234fcd14
...
...
@@ -146,7 +146,7 @@ void __init plat_perf_setup(void)
}
}
unsigned
int
__init
get_c0_compare_int
(
void
)
unsigned
int
__
cpu
init
get_c0_compare_int
(
void
)
{
#ifdef MSC01E_INT_BASE
if
(
cpu_has_veic
)
{
...
...
arch/mips/mipssim/sim_time.c
View file @
234fcd14
...
...
@@ -83,7 +83,7 @@ static void mips_timer_dispatch(void)
}
unsigned
__init
get_c0_compare_int
(
void
)
unsigned
__
cpu
init
get_c0_compare_int
(
void
)
{
#ifdef MSC01E_INT_BASE
if
(
cpu_has_veic
)
{
...
...
arch/mips/mm/c-r3k.c
View file @
234fcd14
...
...
@@ -307,7 +307,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
r3k_flush_dcache_range
(
start
,
start
+
size
);
}
void
__init
r3k_cache_init
(
void
)
void
__
cpu
init
r3k_cache_init
(
void
)
{
extern
void
build_clear_page
(
void
);
extern
void
build_copy_page
(
void
);
...
...
arch/mips/mm/c-r4k.c
View file @
234fcd14
...
...
@@ -93,7 +93,7 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
blast_dcache32_page
(
addr
);
}
static
void
__init
r4k_blast_dcache_page_setup
(
void
)
static
void
__
cpu
init
r4k_blast_dcache_page_setup
(
void
)
{
unsigned
long
dc_lsize
=
cpu_dcache_line_size
();
...
...
@@ -107,7 +107,7 @@ static void __init r4k_blast_dcache_page_setup(void)
static
void
(
*
r4k_blast_dcache_page_indexed
)(
unsigned
long
addr
);
static
void
__init
r4k_blast_dcache_page_indexed_setup
(
void
)
static
void
__
cpu
init
r4k_blast_dcache_page_indexed_setup
(
void
)
{
unsigned
long
dc_lsize
=
cpu_dcache_line_size
();
...
...
@@ -121,7 +121,7 @@ static void __init r4k_blast_dcache_page_indexed_setup(void)
static
void
(
*
r4k_blast_dcache
)(
void
);
static
void
__init
r4k_blast_dcache_setup
(
void
)
static
void
__
cpu
init
r4k_blast_dcache_setup
(
void
)
{
unsigned
long
dc_lsize
=
cpu_dcache_line_size
();
...
...
@@ -206,7 +206,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
static
void
(
*
r4k_blast_icache_page
)(
unsigned
long
addr
);
static
void
__init
r4k_blast_icache_page_setup
(
void
)
static
void
__
cpu
init
r4k_blast_icache_page_setup
(
void
)
{
unsigned
long
ic_lsize
=
cpu_icache_line_size
();
...
...
@@ -223,7 +223,7 @@ static void __init r4k_blast_icache_page_setup(void)
static
void
(
*
r4k_blast_icache_page_indexed
)(
unsigned
long
addr
);
static
void
__init
r4k_blast_icache_page_indexed_setup
(
void
)
static
void
__
cpu
init
r4k_blast_icache_page_indexed_setup
(
void
)
{
unsigned
long
ic_lsize
=
cpu_icache_line_size
();
...
...
@@ -247,7 +247,7 @@ static void __init r4k_blast_icache_page_indexed_setup(void)
static
void
(
*
r4k_blast_icache
)(
void
);
static
void
__init
r4k_blast_icache_setup
(
void
)
static
void
__
cpu
init
r4k_blast_icache_setup
(
void
)
{
unsigned
long
ic_lsize
=
cpu_icache_line_size
();
...
...
@@ -268,7 +268,7 @@ static void __init r4k_blast_icache_setup(void)
static
void
(
*
r4k_blast_scache_page
)(
unsigned
long
addr
);
static
void
__init
r4k_blast_scache_page_setup
(
void
)
static
void
__
cpu
init
r4k_blast_scache_page_setup
(
void
)
{
unsigned
long
sc_lsize
=
cpu_scache_line_size
();
...
...
@@ -286,7 +286,7 @@ static void __init r4k_blast_scache_page_setup(void)
static
void
(
*
r4k_blast_scache_page_indexed
)(
unsigned
long
addr
);
static
void
__init
r4k_blast_scache_page_indexed_setup
(
void
)
static
void
__
cpu
init
r4k_blast_scache_page_indexed_setup
(
void
)
{
unsigned
long
sc_lsize
=
cpu_scache_line_size
();
...
...
@@ -304,7 +304,7 @@ static void __init r4k_blast_scache_page_indexed_setup(void)
static
void
(
*
r4k_blast_scache
)(
void
);
static
void
__init
r4k_blast_scache_setup
(
void
)
static
void
__
cpu
init
r4k_blast_scache_setup
(
void
)
{
unsigned
long
sc_lsize
=
cpu_scache_line_size
();
...
...
@@ -691,11 +691,11 @@ static inline void rm7k_erratum31(void)
}
}
static
char
*
way_string
[]
__initdata
=
{
NULL
,
"direct mapped"
,
"2-way"
,
static
char
*
way_string
[]
__
cpu
initdata
=
{
NULL
,
"direct mapped"
,
"2-way"
,
"3-way"
,
"4-way"
,
"5-way"
,
"6-way"
,
"7-way"
,
"8-way"
};
static
void
__init
probe_pcache
(
void
)
static
void
__
cpu
init
probe_pcache
(
void
)
{
struct
cpuinfo_mips
*
c
=
&
current_cpu_data
;
unsigned
int
config
=
read_c0_config
();
...
...
@@ -1016,7 +1016,7 @@ static void __init probe_pcache(void)
* executes in KSEG1 space or else you will crash and burn badly. You have
* been warned.
*/
static
int
__init
probe_scache
(
void
)
static
int
__
cpu
init
probe_scache
(
void
)
{
unsigned
long
flags
,
addr
,
begin
,
end
,
pow2
;
unsigned
int
config
=
read_c0_config
();
...
...
@@ -1095,7 +1095,7 @@ extern int r5k_sc_init(void);
extern
int
rm7k_sc_init
(
void
);
extern
int
mips_sc_init
(
void
);
static
void
__init
setup_scache
(
void
)
static
void
__
cpu
init
setup_scache
(
void
)
{
struct
cpuinfo_mips
*
c
=
&
current_cpu_data
;
unsigned
int
config
=
read_c0_config
();
...
...
@@ -1206,7 +1206,7 @@ void au1x00_fixup_config_od(void)
}
}
static
void
__init
coherency_setup
(
void
)
static
void
__
cpu
init
coherency_setup
(
void
)
{
change_c0_config
(
CONF_CM_CMASK
,
CONF_CM_DEFAULT
);
...
...
@@ -1238,7 +1238,7 @@ static void __init coherency_setup(void)
}
}
void
__init
r4k_cache_init
(
void
)
void
__
cpu
init
r4k_cache_init
(
void
)
{
extern
void
build_clear_page
(
void
);
extern
void
build_copy_page
(
void
);
...
...
arch/mips/mm/c-tx39.c
View file @
234fcd14
...
...
@@ -329,7 +329,7 @@ static __init void tx39_probe_cache(void)
}
}
void
__init
tx39_cache_init
(
void
)
void
__
cpu
init
tx39_cache_init
(
void
)
{
extern
void
build_clear_page
(
void
);
extern
void
build_copy_page
(
void
);
...
...
arch/mips/mm/cache.c
View file @
234fcd14
...
...
@@ -127,9 +127,10 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
}
}
static
char
cache_panic
[]
__initdata
=
"Yeee, unsupported cache architecture."
;
static
char
cache_panic
[]
__cpuinitdata
=
"Yeee, unsupported cache architecture."
;
void
__init
cpu_cache_init
(
void
)
void
__
dev
init
cpu_cache_init
(
void
)
{
if
(
cpu_has_3k_cache
)
{
extern
void
__weak
r3k_cache_init
(
void
);
...
...
arch/mips/mm/cex-sb1.S
View file @
234fcd14
...
...
@@ -34,8 +34,6 @@
*
is
changed
.
*/
__INIT
.
set
mips64
.
set
noreorder
.
set
noat
...
...
@@ -51,6 +49,8 @@
*
(
0x170
-
0x17f
)
are
used
to
preserve
k0
,
k1
,
and
ra
.
*/
__CPUINIT
LEAF
(
except_vec2_sb1
)
/
*
*
If
this
error
is
recoverable
,
we
need
to
exit
the
handler
...
...
arch/mips/mm/pg-r4k.c
View file @
234fcd14
...
...
@@ -66,21 +66,21 @@ EXPORT_SYMBOL(copy_page);
* with 64-bit kernels. The prefetch offsets have been experimentally tuned
* an Origin 200.
*/
static
int
pref_offset_clear
__initdata
=
512
;
static
int
pref_offset_copy
__initdata
=
256
;
static
int
pref_offset_clear
__
cpu
initdata
=
512
;
static
int
pref_offset_copy
__
cpu
initdata
=
256
;
static
unsigned
int
pref_src_mode
__initdata
;
static
unsigned
int
pref_dst_mode
__initdata
;
static
unsigned
int
pref_src_mode
__
cpu
initdata
;
static
unsigned
int
pref_dst_mode
__
cpu
initdata
;
static
int
load_offset
__initdata
;
static
int
store_offset
__initdata
;
static
int
load_offset
__
cpu
initdata
;
static
int
store_offset
__
cpu
initdata
;
static
unsigned
int
__initdata
*
dest
,
*
epc
;
static
unsigned
int
__
cpu
initdata
*
dest
,
*
epc
;
static
unsigned
int
instruction_pending
;
static
union
mips_instruction
delayed_mi
;
static
void
__init
emit_instruction
(
union
mips_instruction
mi
)
static
void
__
cpu
init
emit_instruction
(
union
mips_instruction
mi
)
{
if
(
instruction_pending
)
*
epc
++
=
delayed_mi
.
word
;
...
...
@@ -222,7 +222,7 @@ static inline void build_cdex_p(void)
emit_instruction
(
mi
);
}
static
void
__init
__build_store_reg
(
int
reg
)
static
void
__
cpu
init
__build_store_reg
(
int
reg
)
{
union
mips_instruction
mi
;
unsigned
int
width
;
...
...
@@ -339,7 +339,7 @@ static inline void build_jr_ra(void)
flush_delay_slot_or_nop
();
}
void
__init
build_clear_page
(
void
)
void
__
cpu
init
build_clear_page
(
void
)
{
unsigned
int
loop_start
;
unsigned
long
off
;
...
...
@@ -442,7 +442,7 @@ dest = label();
pr_debug
(
"
\t
.set pop
\n
"
);
}
void
__init
build_copy_page
(
void
)
void
__
cpu
init
build_copy_page
(
void
)
{
unsigned
int
loop_start
;
unsigned
long
off
;
...
...
arch/mips/mm/pg-sb1.c
View file @
234fcd14
...
...
@@ -293,10 +293,10 @@ void copy_page(void *to, void *from)
EXPORT_SYMBOL
(
clear_page
);
EXPORT_SYMBOL
(
copy_page
);
void
__init
build_clear_page
(
void
)
void
__
cpu
init
build_clear_page
(
void
)
{
}
void
__init
build_copy_page
(
void
)
void
__
cpu
init
build_copy_page
(
void
)
{
}
arch/mips/mm/sc-ip22.c
View file @
234fcd14
...
...
@@ -168,7 +168,7 @@ struct bcache_ops indy_sc_ops = {
.
bc_inv
=
indy_sc_wback_invalidate
};
void
__init
indy_sc_init
(
void
)
void
__
cpu
init
indy_sc_init
(
void
)
{
if
(
indy_sc_probe
())
{
indy_sc_enable
();
...
...
arch/mips/mm/sc-mips.c
View file @
234fcd14
...
...
@@ -100,7 +100,7 @@ static inline int __init mips_sc_probe(void)
return
1
;
}
int
__init
mips_sc_init
(
void
)
int
__
cpu
init
mips_sc_init
(
void
)
{
int
found
=
mips_sc_probe
();
if
(
found
)
{
...
...
@@ -109,4 +109,3 @@ int __init mips_sc_init(void)
}
return
found
;
}
arch/mips/mm/sc-r5k.c
View file @
234fcd14
...
...
@@ -99,7 +99,7 @@ static struct bcache_ops r5k_sc_ops = {
.
bc_inv
=
r5k_dma_cache_inv_sc
};
void
__init
r5k_sc_init
(
void
)
void
__
cpu
init
r5k_sc_init
(
void
)
{
if
(
r5k_sc_probe
())
{
r5k_sc_enable
();
...
...
arch/mips/mm/sc-rm7k.c
View file @
234fcd14
...
...
@@ -128,7 +128,7 @@ struct bcache_ops rm7k_sc_ops = {
.
bc_inv
=
rm7k_sc_inv
};
void
__init
rm7k_sc_init
(
void
)
void
__
cpu
init
rm7k_sc_init
(
void
)
{
struct
cpuinfo_mips
*
c
=
&
current_cpu_data
;
unsigned
int
config
=
read_c0_config
();
...
...
arch/mips/mm/tlb-r3k.c
View file @
234fcd14
...
...
@@ -281,7 +281,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
}
}
void
__init
tlb_init
(
void
)
void
__
cpu
init
tlb_init
(
void
)
{
local_flush_tlb_all
();
...
...
arch/mips/mm/tlb-r4k.c
View file @
234fcd14
...
...
@@ -388,7 +388,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
* lifetime of the system
*/
static
int
temp_tlb_entry
__initdata
;
static
int
temp_tlb_entry
__
cpu
initdata
;
__init
int
add_temporary_entry
(
unsigned
long
entrylo0
,
unsigned
long
entrylo1
,
unsigned
long
entryhi
,
unsigned
long
pagemask
)
...
...
@@ -427,7 +427,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
return
ret
;
}
static
void
__init
probe_tlb
(
unsigned
long
config
)
static
void
__
cpu
init
probe_tlb
(
unsigned
long
config
)
{
struct
cpuinfo_mips
*
c
=
&
current_cpu_data
;
unsigned
int
reg
;
...
...
@@ -455,7 +455,7 @@ static void __init probe_tlb(unsigned long config)
c
->
tlbsize
=
((
reg
>>
25
)
&
0x3f
)
+
1
;
}
static
int
__initdata
ntlb
=
0
;
static
int
__
cpu
initdata
ntlb
=
0
;
static
int
__init
set_ntlb
(
char
*
str
)
{
get_option
(
&
str
,
&
ntlb
);
...
...
@@ -464,7 +464,7 @@ static int __init set_ntlb(char *str)
__setup
(
"ntlb="
,
set_ntlb
);
void
__init
tlb_init
(
void
)
void
__
cpu
init
tlb_init
(
void
)
{
unsigned
int
config
=
read_c0_config
();
...
...
arch/mips/mm/tlb-r8k.c
View file @
234fcd14
...
...
@@ -214,14 +214,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_restore
(
flags
);
}
static
void
__init
probe_tlb
(
unsigned
long
config
)
static
void
__
cpu
init
probe_tlb
(
unsigned
long
config
)
{
struct
cpuinfo_mips
*
c
=
&
current_cpu_data
;
c
->
tlbsize
=
3
*
128
;
/* 3 sets each 128 entries */
}
void
__init
tlb_init
(
void
)
void
__
cpu
init
tlb_init
(
void
)
{
unsigned
int
config
=
read_c0_config
();
unsigned
long
status
;
...
...
arch/mips/mm/tlbex.c
View file @
234fcd14
...
...
@@ -60,7 +60,7 @@ static inline int __maybe_unused r10000_llsc_war(void)
* why; it's not an issue caused by the core RTL.
*
*/
static
int
__init
m4kc_tlbp_war
(
void
)
static
int
__
cpu
init
m4kc_tlbp_war
(
void
)
{
return
(
current_cpu_data
.
processor_id
&
0xffff00
)
==
(
PRID_COMP_MIPS
|
PRID_IMP_4KC
);
...
...
@@ -144,16 +144,16 @@ static inline void dump_handler(const u32 *handler, int count)
* We deliberately chose a buffer size of 128, so we won't scribble
* over anything important on overflow before we panic.
*/
static
u32
tlb_handler
[
128
]
__initdata
;
static
u32
tlb_handler
[
128
]
__
cpu
initdata
;
/* simply assume worst case size for labels and relocs */
static
struct
uasm_label
labels
[
128
]
__initdata
;
static
struct
uasm_reloc
relocs
[
128
]
__initdata
;
static
struct
uasm_label
labels
[
128
]
__
cpu
initdata
;
static
struct
uasm_reloc
relocs
[
128
]
__
cpu
initdata
;
/*
* The R3000 TLB handler is simple.
*/
static
void
__init
build_r3000_tlb_refill_handler
(
void
)
static
void
__
cpu
init
build_r3000_tlb_refill_handler
(
void
)
{
long
pgdc
=
(
long
)
pgd_current
;
u32
*
p
;
...
...
@@ -197,7 +197,7 @@ static void __init build_r3000_tlb_refill_handler(void)
* other one.To keep things simple, we first assume linear space,
* then we relocate it to the final handler layout as needed.
*/
static
u32
final_handler
[
64
]
__initdata
;
static
u32
final_handler
[
64
]
__
cpu
initdata
;
/*
* Hazards
...
...
@@ -221,7 +221,7 @@ static u32 final_handler[64] __initdata;
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
static
void
__init
__maybe_unused
build_tlb_probe_entry
(
u32
**
p
)
static
void
__
cpu
init
__maybe_unused
build_tlb_probe_entry
(
u32
**
p
)
{
switch
(
current_cpu_type
())
{
/* Found by experiment: R4600 v2.0 needs this, too. */
...
...
@@ -245,7 +245,7 @@ static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
*/
enum
tlb_write_entry
{
tlb_random
,
tlb_indexed
};
static
void
__init
build_tlb_write_entry
(
u32
**
p
,
struct
uasm_label
**
l
,
static
void
__
cpu
init
build_tlb_write_entry
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
enum
tlb_write_entry
wmode
)
{
...
...
@@ -389,7 +389,7 @@ static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l,
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
*/
static
void
__init
static
void
__
cpu
init
build_get_pmde64
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
unsigned
int
tmp
,
unsigned
int
ptr
)
{
...
...
@@ -450,7 +450,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc.
*/
static
void
__init
static
void
__
cpu
init
build_get_pgd_vmalloc64
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
unsigned
int
bvaddr
,
unsigned
int
ptr
)
{
...
...
@@ -522,7 +522,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry.
*/
static
void
__init
__maybe_unused
static
void
__
cpu
init
__maybe_unused
build_get_pgde32
(
u32
**
p
,
unsigned
int
tmp
,
unsigned
int
ptr
)
{
long
pgdc
=
(
long
)
pgd_current
;
...
...
@@ -557,7 +557,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#endif
/* !CONFIG_64BIT */
static
void
__init
build_adjust_context
(
u32
**
p
,
unsigned
int
ctx
)
static
void
__
cpu
init
build_adjust_context
(
u32
**
p
,
unsigned
int
ctx
)
{
unsigned
int
shift
=
4
-
(
PTE_T_LOG2
+
1
)
+
PAGE_SHIFT
-
12
;
unsigned
int
mask
=
(
PTRS_PER_PTE
/
2
-
1
)
<<
(
PTE_T_LOG2
+
1
);
...
...
@@ -583,7 +583,7 @@ static void __init build_adjust_context(u32 **p, unsigned int ctx)
uasm_i_andi
(
p
,
ctx
,
ctx
,
mask
);
}
static
void
__init
build_get_ptep
(
u32
**
p
,
unsigned
int
tmp
,
unsigned
int
ptr
)
static
void
__
cpu
init
build_get_ptep
(
u32
**
p
,
unsigned
int
tmp
,
unsigned
int
ptr
)
{
/*
* Bug workaround for the Nevada. It seems as if under certain
...
...
@@ -608,7 +608,7 @@ static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
UASM_i_ADDU
(
p
,
ptr
,
ptr
,
tmp
);
/* add in offset */
}
static
void
__init
build_update_entries
(
u32
**
p
,
unsigned
int
tmp
,
static
void
__
cpu
init
build_update_entries
(
u32
**
p
,
unsigned
int
tmp
,
unsigned
int
ptep
)
{
/*
...
...
@@ -651,7 +651,7 @@ static void __init build_update_entries(u32 **p, unsigned int tmp,
#endif
}
static
void
__init
build_r4000_tlb_refill_handler
(
void
)
static
void
__
cpu
init
build_r4000_tlb_refill_handler
(
void
)
{
u32
*
p
=
tlb_handler
;
struct
uasm_label
*
l
=
labels
;
...
...
@@ -783,7 +783,7 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
u32
handle_tlbs
[
FASTPATH_SIZE
]
__cacheline_aligned
;
u32
handle_tlbm
[
FASTPATH_SIZE
]
__cacheline_aligned
;
static
void
__init
static
void
__
cpu
init
iPTE_LW
(
u32
**
p
,
struct
uasm_label
**
l
,
unsigned
int
pte
,
unsigned
int
ptr
)
{
#ifdef CONFIG_SMP
...
...
@@ -803,7 +803,7 @@ iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
#endif
}
static
void
__init
static
void
__
cpu
init
iPTE_SW
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
pte
,
unsigned
int
ptr
,
unsigned
int
mode
)
{
...
...
@@ -863,7 +863,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
* the page table where this PTE is located, PTE will be re-loaded
* with it's original value.
*/
static
void
__init
static
void
__
cpu
init
build_pte_present
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
unsigned
int
pte
,
unsigned
int
ptr
,
enum
label_id
lid
)
{
...
...
@@ -874,7 +874,7 @@ build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
}
/* Make PTE valid, store result in PTR. */
static
void
__init
static
void
__
cpu
init
build_make_valid
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
pte
,
unsigned
int
ptr
)
{
...
...
@@ -887,7 +887,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be written to, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
static
void
__init
static
void
__
cpu
init
build_pte_writable
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
unsigned
int
pte
,
unsigned
int
ptr
,
enum
label_id
lid
)
{
...
...
@@ -900,7 +900,7 @@ build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Make PTE writable, update software status bits as well, then store
* at PTR.
*/
static
void
__init
static
void
__
cpu
init
build_make_write
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
pte
,
unsigned
int
ptr
)
{
...
...
@@ -914,7 +914,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be modified, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
static
void
__init
static
void
__
cpu
init
build_pte_modifiable
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
unsigned
int
pte
,
unsigned
int
ptr
,
enum
label_id
lid
)
{
...
...
@@ -931,7 +931,7 @@ build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* This places the pte into ENTRYLO0 and writes it with tlbwi.
* Then it returns.
*/
static
void
__init
static
void
__
cpu
init
build_r3000_pte_reload_tlbwi
(
u32
**
p
,
unsigned
int
pte
,
unsigned
int
tmp
)
{
uasm_i_mtc0
(
p
,
pte
,
C0_ENTRYLO0
);
/* cp0 delay */
...
...
@@ -947,7 +947,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
* may have the probe fail bit set as a result of a trap on a
* kseg2 access, i.e. without refill. Then it returns.
*/
static
void
__init
static
void
__
cpu
init
build_r3000_tlb_reload_write
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
unsigned
int
pte
,
unsigned
int
tmp
)
...
...
@@ -965,7 +965,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
uasm_i_rfe
(
p
);
/* branch delay */
}
static
void
__init
static
void
__
cpu
init
build_r3000_tlbchange_handler_head
(
u32
**
p
,
unsigned
int
pte
,
unsigned
int
ptr
)
{
...
...
@@ -985,7 +985,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
uasm_i_tlbp
(
p
);
/* load delay */
}
static
void
__init
build_r3000_tlb_load_handler
(
void
)
static
void
__
cpu
init
build_r3000_tlb_load_handler
(
void
)
{
u32
*
p
=
handle_tlbl
;
struct
uasm_label
*
l
=
labels
;
...
...
@@ -1015,7 +1015,7 @@ static void __init build_r3000_tlb_load_handler(void)
dump_handler
(
handle_tlbl
,
ARRAY_SIZE
(
handle_tlbl
));
}
static
void
__init
build_r3000_tlb_store_handler
(
void
)
static
void
__
cpu
init
build_r3000_tlb_store_handler
(
void
)
{
u32
*
p
=
handle_tlbs
;
struct
uasm_label
*
l
=
labels
;
...
...
@@ -1045,7 +1045,7 @@ static void __init build_r3000_tlb_store_handler(void)
dump_handler
(
handle_tlbs
,
ARRAY_SIZE
(
handle_tlbs
));
}
static
void
__init
build_r3000_tlb_modify_handler
(
void
)
static
void
__
cpu
init
build_r3000_tlb_modify_handler
(
void
)
{
u32
*
p
=
handle_tlbm
;
struct
uasm_label
*
l
=
labels
;
...
...
@@ -1078,7 +1078,7 @@ static void __init build_r3000_tlb_modify_handler(void)
/*
* R4000 style TLB load/store/modify handlers.
*/
static
void
__init
static
void
__
cpu
init
build_r4000_tlbchange_handler_head
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
unsigned
int
pte
,
unsigned
int
ptr
)
...
...
@@ -1103,7 +1103,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
build_tlb_probe_entry
(
p
);
}
static
void
__init
static
void
__
cpu
init
build_r4000_tlbchange_handler_tail
(
u32
**
p
,
struct
uasm_label
**
l
,
struct
uasm_reloc
**
r
,
unsigned
int
tmp
,
unsigned
int
ptr
)
...
...
@@ -1120,7 +1120,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
#endif
}
static
void
__init
build_r4000_tlb_load_handler
(
void
)
static
void
__
cpu
init
build_r4000_tlb_load_handler
(
void
)
{
u32
*
p
=
handle_tlbl
;
struct
uasm_label
*
l
=
labels
;
...
...
@@ -1160,7 +1160,7 @@ static void __init build_r4000_tlb_load_handler(void)
dump_handler
(
handle_tlbl
,
ARRAY_SIZE
(
handle_tlbl
));
}
static
void
__init
build_r4000_tlb_store_handler
(
void
)
static
void
__
cpu
init
build_r4000_tlb_store_handler
(
void
)
{
u32
*
p
=
handle_tlbs
;
struct
uasm_label
*
l
=
labels
;
...
...
@@ -1191,7 +1191,7 @@ static void __init build_r4000_tlb_store_handler(void)
dump_handler
(
handle_tlbs
,
ARRAY_SIZE
(
handle_tlbs
));
}
static
void
__init
build_r4000_tlb_modify_handler
(
void
)
static
void
__
cpu
init
build_r4000_tlb_modify_handler
(
void
)
{
u32
*
p
=
handle_tlbm
;
struct
uasm_label
*
l
=
labels
;
...
...
@@ -1223,7 +1223,7 @@ static void __init build_r4000_tlb_modify_handler(void)
dump_handler
(
handle_tlbm
,
ARRAY_SIZE
(
handle_tlbm
));
}
void
__init
build_tlb_refill_handler
(
void
)
void
__
cpu
init
build_tlb_refill_handler
(
void
)
{
/*
* The refill handler is generated per-CPU, multi-node systems
...
...
@@ -1269,7 +1269,7 @@ void __init build_tlb_refill_handler(void)
}
}
void
__init
flush_tlb_handlers
(
void
)
void
__
cpu
init
flush_tlb_handlers
(
void
)
{
flush_icache_range
((
unsigned
long
)
handle_tlbl
,
(
unsigned
long
)
handle_tlbl
+
sizeof
(
handle_tlbl
));
...
...
arch/mips/mm/uasm.c
View file @
234fcd14
...
...
@@ -82,7 +82,7 @@ struct insn {
| (e) << RE_SH \
| (f) << FUNC_SH)
static
struct
insn
insn_table
[]
__initdata
=
{
static
struct
insn
insn_table
[]
__
cpu
initdata
=
{
{
insn_addiu
,
M
(
addiu_op
,
0
,
0
,
0
,
0
,
0
),
RS
|
RT
|
SIMM
},
{
insn_addu
,
M
(
spec_op
,
0
,
0
,
0
,
0
,
addu_op
),
RS
|
RT
|
RD
},
{
insn_and
,
M
(
spec_op
,
0
,
0
,
0
,
0
,
and_op
),
RS
|
RT
|
RD
},
...
...
@@ -135,7 +135,7 @@ static struct insn insn_table[] __initdata = {
#undef M
static
inline
__init
u32
build_rs
(
u32
arg
)
static
inline
__
cpu
init
u32
build_rs
(
u32
arg
)
{
if
(
arg
&
~
RS_MASK
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -143,7 +143,7 @@ static inline __init u32 build_rs(u32 arg)
return
(
arg
&
RS_MASK
)
<<
RS_SH
;
}
static
inline
__init
u32
build_rt
(
u32
arg
)
static
inline
__
cpu
init
u32
build_rt
(
u32
arg
)
{
if
(
arg
&
~
RT_MASK
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -151,7 +151,7 @@ static inline __init u32 build_rt(u32 arg)
return
(
arg
&
RT_MASK
)
<<
RT_SH
;
}
static
inline
__init
u32
build_rd
(
u32
arg
)
static
inline
__
cpu
init
u32
build_rd
(
u32
arg
)
{
if
(
arg
&
~
RD_MASK
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -159,7 +159,7 @@ static inline __init u32 build_rd(u32 arg)
return
(
arg
&
RD_MASK
)
<<
RD_SH
;
}
static
inline
__init
u32
build_re
(
u32
arg
)
static
inline
__
cpu
init
u32
build_re
(
u32
arg
)
{
if
(
arg
&
~
RE_MASK
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -167,7 +167,7 @@ static inline __init u32 build_re(u32 arg)
return
(
arg
&
RE_MASK
)
<<
RE_SH
;
}
static
inline
__init
u32
build_simm
(
s32
arg
)
static
inline
__
cpu
init
u32
build_simm
(
s32
arg
)
{
if
(
arg
>
0x7fff
||
arg
<
-
0x8000
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -175,7 +175,7 @@ static inline __init u32 build_simm(s32 arg)
return
arg
&
0xffff
;
}
static
inline
__init
u32
build_uimm
(
u32
arg
)
static
inline
__
cpu
init
u32
build_uimm
(
u32
arg
)
{
if
(
arg
&
~
IMM_MASK
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -183,7 +183,7 @@ static inline __init u32 build_uimm(u32 arg)
return
arg
&
IMM_MASK
;
}
static
inline
__init
u32
build_bimm
(
s32
arg
)
static
inline
__
cpu
init
u32
build_bimm
(
s32
arg
)
{
if
(
arg
>
0x1ffff
||
arg
<
-
0x20000
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -194,7 +194,7 @@ static inline __init u32 build_bimm(s32 arg)
return
((
arg
<
0
)
?
(
1
<<
15
)
:
0
)
|
((
arg
>>
2
)
&
0x7fff
);
}
static
inline
__init
u32
build_jimm
(
u32
arg
)
static
inline
__
cpu
init
u32
build_jimm
(
u32
arg
)
{
if
(
arg
&
~
((
JIMM_MASK
)
<<
2
))
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -202,7 +202,7 @@ static inline __init u32 build_jimm(u32 arg)
return
(
arg
>>
2
)
&
JIMM_MASK
;
}
static
inline
__init
u32
build_func
(
u32
arg
)
static
inline
__
cpu
init
u32
build_func
(
u32
arg
)
{
if
(
arg
&
~
FUNC_MASK
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -210,7 +210,7 @@ static inline __init u32 build_func(u32 arg)
return
arg
&
FUNC_MASK
;
}
static
inline
__init
u32
build_set
(
u32
arg
)
static
inline
__
cpu
init
u32
build_set
(
u32
arg
)
{
if
(
arg
&
~
SET_MASK
)
printk
(
KERN_WARNING
"Micro-assembler field overflow
\n
"
);
...
...
@@ -222,7 +222,7 @@ static inline __init u32 build_set(u32 arg)
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
static
void
__init
build_insn
(
u32
**
buf
,
enum
opcode
opc
,
...)
static
void
__
cpu
init
build_insn
(
u32
**
buf
,
enum
opcode
opc
,
...)
{
struct
insn
*
ip
=
NULL
;
unsigned
int
i
;
...
...
@@ -375,14 +375,14 @@ I_u3u1u2(_xor)
I_u2u1u3
(
_xori
)
/* Handle labels. */
void
__init
uasm_build_label
(
struct
uasm_label
**
lab
,
u32
*
addr
,
int
lid
)
void
__
cpu
init
uasm_build_label
(
struct
uasm_label
**
lab
,
u32
*
addr
,
int
lid
)
{
(
*
lab
)
->
addr
=
addr
;
(
*
lab
)
->
lab
=
lid
;
(
*
lab
)
++
;
}
int
__init
uasm_in_compat_space_p
(
long
addr
)
int
__
cpu
init
uasm_in_compat_space_p
(
long
addr
)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
...
...
@@ -392,7 +392,7 @@ int __init uasm_in_compat_space_p(long addr)
#endif
}
int
__init
uasm_rel_highest
(
long
val
)
int
__
cpu
init
uasm_rel_highest
(
long
val
)
{
#ifdef CONFIG_64BIT
return
((((
val
+
0x800080008000L
)
>>
48
)
&
0xffff
)
^
0x8000
)
-
0x8000
;
...
...
@@ -401,7 +401,7 @@ int __init uasm_rel_highest(long val)
#endif
}
int
__init
uasm_rel_higher
(
long
val
)
int
__
cpu
init
uasm_rel_higher
(
long
val
)
{
#ifdef CONFIG_64BIT
return
((((
val
+
0x80008000L
)
>>
32
)
&
0xffff
)
^
0x8000
)
-
0x8000
;
...
...
@@ -410,17 +410,17 @@ int __init uasm_rel_higher(long val)
#endif
}
int
__init
uasm_rel_hi
(
long
val
)
int
__
cpu
init
uasm_rel_hi
(
long
val
)
{
return
((((
val
+
0x8000L
)
>>
16
)
&
0xffff
)
^
0x8000
)
-
0x8000
;
}
int
__init
uasm_rel_lo
(
long
val
)
int
__
cpu
init
uasm_rel_lo
(
long
val
)
{
return
((
val
&
0xffff
)
^
0x8000
)
-
0x8000
;
}
void
__init
UASM_i_LA_mostly
(
u32
**
buf
,
unsigned
int
rs
,
long
addr
)
void
__
cpu
init
UASM_i_LA_mostly
(
u32
**
buf
,
unsigned
int
rs
,
long
addr
)
{
if
(
!
uasm_in_compat_space_p
(
addr
))
{
uasm_i_lui
(
buf
,
rs
,
uasm_rel_highest
(
addr
));
...
...
@@ -436,7 +436,7 @@ void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
uasm_i_lui
(
buf
,
rs
,
uasm_rel_hi
(
addr
));
}
void
__init
UASM_i_LA
(
u32
**
buf
,
unsigned
int
rs
,
long
addr
)
void
__
cpu
init
UASM_i_LA
(
u32
**
buf
,
unsigned
int
rs
,
long
addr
)
{
UASM_i_LA_mostly
(
buf
,
rs
,
addr
);
if
(
uasm_rel_lo
(
addr
))
{
...
...
@@ -448,7 +448,7 @@ void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr)
}
/* Handle relocations. */
void
__init
void
__
cpu
init
uasm_r_mips_pc16
(
struct
uasm_reloc
**
rel
,
u32
*
addr
,
int
lid
)
{
(
*
rel
)
->
addr
=
addr
;
...
...
@@ -457,7 +457,7 @@ uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
(
*
rel
)
++
;
}
static
inline
void
__init
static
inline
void
__
cpu
init
__resolve_relocs
(
struct
uasm_reloc
*
rel
,
struct
uasm_label
*
lab
)
{
long
laddr
=
(
long
)
lab
->
addr
;
...
...
@@ -474,7 +474,7 @@ __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
}
}
void
__init
void
__
cpu
init
uasm_resolve_relocs
(
struct
uasm_reloc
*
rel
,
struct
uasm_label
*
lab
)
{
struct
uasm_label
*
l
;
...
...
@@ -485,7 +485,7 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
__resolve_relocs
(
rel
,
l
);
}
void
__init
void
__
cpu
init
uasm_move_relocs
(
struct
uasm_reloc
*
rel
,
u32
*
first
,
u32
*
end
,
long
off
)
{
for
(;
rel
->
lab
!=
UASM_LABEL_INVALID
;
rel
++
)
...
...
@@ -493,7 +493,7 @@ uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
rel
->
addr
+=
off
;
}
void
__init
void
__
cpu
init
uasm_move_labels
(
struct
uasm_label
*
lab
,
u32
*
first
,
u32
*
end
,
long
off
)
{
for
(;
lab
->
lab
!=
UASM_LABEL_INVALID
;
lab
++
)
...
...
@@ -501,7 +501,7 @@ uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
lab
->
addr
+=
off
;
}
void
__init
void
__
cpu
init
uasm_copy_handler
(
struct
uasm_reloc
*
rel
,
struct
uasm_label
*
lab
,
u32
*
first
,
u32
*
end
,
u32
*
target
)
{
...
...
@@ -513,7 +513,7 @@ uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
uasm_move_labels
(
lab
,
first
,
end
,
off
);
}
int
__init
uasm_insn_has_bdelay
(
struct
uasm_reloc
*
rel
,
u32
*
addr
)
int
__
cpu
init
uasm_insn_has_bdelay
(
struct
uasm_reloc
*
rel
,
u32
*
addr
)
{
for
(;
rel
->
lab
!=
UASM_LABEL_INVALID
;
rel
++
)
{
if
(
rel
->
addr
==
addr
...
...
@@ -526,49 +526,49 @@ int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
}
/* Convenience functions for labeled branches. */
void
__init
void
__
cpu
init
uasm_il_bltz
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
)
{
uasm_r_mips_pc16
(
r
,
*
p
,
lid
);
uasm_i_bltz
(
p
,
reg
,
0
);
}
void
__init
void
__
cpu
init
uasm_il_b
(
u32
**
p
,
struct
uasm_reloc
**
r
,
int
lid
)
{
uasm_r_mips_pc16
(
r
,
*
p
,
lid
);
uasm_i_b
(
p
,
0
);
}
void
__init
void
__
cpu
init
uasm_il_beqz
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
)
{
uasm_r_mips_pc16
(
r
,
*
p
,
lid
);
uasm_i_beqz
(
p
,
reg
,
0
);
}
void
__init
void
__
cpu
init
uasm_il_beqzl
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
)
{
uasm_r_mips_pc16
(
r
,
*
p
,
lid
);
uasm_i_beqzl
(
p
,
reg
,
0
);
}
void
__init
void
__
cpu
init
uasm_il_bnez
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
)
{
uasm_r_mips_pc16
(
r
,
*
p
,
lid
);
uasm_i_bnez
(
p
,
reg
,
0
);
}
void
__init
void
__
cpu
init
uasm_il_bgezl
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
)
{
uasm_r_mips_pc16
(
r
,
*
p
,
lid
);
uasm_i_bgezl
(
p
,
reg
,
0
);
}
void
__init
void
__
cpu
init
uasm_il_bgez
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
)
{
uasm_r_mips_pc16
(
r
,
*
p
,
lid
);
...
...
arch/mips/mm/uasm.h
View file @
234fcd14
...
...
@@ -11,38 +11,38 @@
#include <linux/types.h>
#define Ip_u1u2u3(op) \
void __init \
void __
cpu
init \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \
void __init \
void __
cpu
init \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \
void __init \
void __
cpu
init \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \
void __init \
void __
cpu
init \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \
void __init \
void __
cpu
init \
uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_u2u1s3(op) \
void __init \
void __
cpu
init \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u1u2(op) \
void __init uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
void __
cpu
init uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \
void __init uasm_i##op(u32 **buf, unsigned int a, signed int b)
void __
cpu
init uasm_i##op(u32 **buf, unsigned int a, signed int b)
#define Ip_u1(op) void __init uasm_i##op(u32 **buf, unsigned int a)
#define Ip_u1(op) void __
cpu
init uasm_i##op(u32 **buf, unsigned int a)
#define Ip_0(op) void __init uasm_i##op(u32 **buf)
#define Ip_0(op) void __
cpu
init uasm_i##op(u32 **buf)
Ip_u2u1s3
(
_addiu
);
Ip_u3u1u2
(
_addu
);
...
...
@@ -98,19 +98,19 @@ struct uasm_label {
int
lab
;
};
void
__init
uasm_build_label
(
struct
uasm_label
**
lab
,
u32
*
addr
,
int
lid
);
void
__
cpu
init
uasm_build_label
(
struct
uasm_label
**
lab
,
u32
*
addr
,
int
lid
);
#ifdef CONFIG_64BIT
int
__init
uasm_in_compat_space_p
(
long
addr
);
int
__init
uasm_rel_highest
(
long
val
);
int
__init
uasm_rel_higher
(
long
val
);
int
uasm_in_compat_space_p
(
long
addr
);
int
uasm_rel_highest
(
long
val
);
int
uasm_rel_higher
(
long
val
);
#endif
int
__init
uasm_rel_hi
(
long
val
);
int
__init
uasm_rel_lo
(
long
val
);
void
__init
UASM_i_LA_mostly
(
u32
**
buf
,
unsigned
int
rs
,
long
addr
);
void
__init
UASM_i_LA
(
u32
**
buf
,
unsigned
int
rs
,
long
addr
);
int
uasm_rel_hi
(
long
val
);
int
uasm_rel_lo
(
long
val
);
void
UASM_i_LA_mostly
(
u32
**
buf
,
unsigned
int
rs
,
long
addr
);
void
UASM_i_LA
(
u32
**
buf
,
unsigned
int
rs
,
long
addr
);
#define UASM_L_LA(lb) \
static inline void __init uasm_l##lb(struct uasm_label **lab, u32 *addr) \
static inline void __
cpu
init uasm_l##lb(struct uasm_label **lab, u32 *addr) \
{ \
uasm_build_label(lab, addr, label##lb); \
}
...
...
@@ -164,29 +164,19 @@ struct uasm_reloc {
/* This is zero so we can use zeroed label arrays. */
#define UASM_LABEL_INVALID 0
void
__init
uasm_r_mips_pc16
(
struct
uasm_reloc
**
rel
,
u32
*
addr
,
int
lid
);
void
__init
uasm_resolve_relocs
(
struct
uasm_reloc
*
rel
,
struct
uasm_label
*
lab
);
void
__init
uasm_move_relocs
(
struct
uasm_reloc
*
rel
,
u32
*
first
,
u32
*
end
,
long
off
);
void
__init
uasm_move_labels
(
struct
uasm_label
*
lab
,
u32
*
first
,
u32
*
end
,
long
off
);
void
__init
uasm_copy_handler
(
struct
uasm_reloc
*
rel
,
struct
uasm_label
*
lab
,
u32
*
first
,
u32
*
end
,
u32
*
target
);
int
__init
uasm_insn_has_bdelay
(
struct
uasm_reloc
*
rel
,
u32
*
addr
);
void
uasm_r_mips_pc16
(
struct
uasm_reloc
**
rel
,
u32
*
addr
,
int
lid
);
void
uasm_resolve_relocs
(
struct
uasm_reloc
*
rel
,
struct
uasm_label
*
lab
);
void
uasm_move_relocs
(
struct
uasm_reloc
*
rel
,
u32
*
first
,
u32
*
end
,
long
off
);
void
uasm_move_labels
(
struct
uasm_label
*
lab
,
u32
*
first
,
u32
*
end
,
long
off
);
void
uasm_copy_handler
(
struct
uasm_reloc
*
rel
,
struct
uasm_label
*
lab
,
u32
*
first
,
u32
*
end
,
u32
*
target
);
int
uasm_insn_has_bdelay
(
struct
uasm_reloc
*
rel
,
u32
*
addr
);
/* Convenience functions for labeled branches. */
void
__init
uasm_il_bltz
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
__init
uasm_il_b
(
u32
**
p
,
struct
uasm_reloc
**
r
,
int
lid
);
void
__init
uasm_il_beqz
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
__init
uasm_il_beqzl
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
__init
uasm_il_bnez
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
__init
uasm_il_bgezl
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
__init
uasm_il_bgez
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
uasm_il_bltz
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
uasm_il_b
(
u32
**
p
,
struct
uasm_reloc
**
r
,
int
lid
);
void
uasm_il_beqz
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
uasm_il_beqzl
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
uasm_il_bnez
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
uasm_il_bgezl
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
void
uasm_il_bgez
(
u32
**
p
,
struct
uasm_reloc
**
r
,
unsigned
int
reg
,
int
lid
);
arch/mips/pci/pci-ip27.c
View file @
234fcd14
...
...
@@ -40,7 +40,7 @@ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
extern
struct
pci_ops
bridge_pci_ops
;
int
__init
bridge_probe
(
nasid_t
nasid
,
int
widget_id
,
int
masterwid
)
int
__
cpu
init
bridge_probe
(
nasid_t
nasid
,
int
widget_id
,
int
masterwid
)
{
unsigned
long
offset
=
NODE_OFFSET
(
nasid
);
struct
bridge_controller
*
bc
;
...
...
arch/mips/pci/pci.c
View file @
234fcd14
...
...
@@ -260,7 +260,7 @@ static void pcibios_fixup_device_resources(struct pci_dev *dev,
}
}
void
pcibios_fixup_bus
(
struct
pci_bus
*
bus
)
void
__devinit
pcibios_fixup_bus
(
struct
pci_bus
*
bus
)
{
/* Propagate hose info into the subordinate devices. */
...
...
arch/mips/sgi-ip27/ip27-init.c
View file @
234fcd14
...
...
@@ -53,7 +53,7 @@ extern void pcibr_setup(cnodeid_t);
extern
void
xtalk_probe_node
(
cnodeid_t
nid
);
static
void
__init
per_hub_init
(
cnodeid_t
cnode
)
static
void
__
cpu
init
per_hub_init
(
cnodeid_t
cnode
)
{
struct
hub_data
*
hub
=
hub_data
(
cnode
);
nasid_t
nasid
=
COMPACT_TO_NASID_NODEID
(
cnode
);
...
...
arch/mips/sgi-ip27/ip27-timer.c
View file @
234fcd14
...
...
@@ -285,7 +285,7 @@ void __cpuinit cpu_time_init(void)
set_c0_status
(
SRB_TIMOCLK
);
}
void
__init
hub_rtc_init
(
cnodeid_t
cnode
)
void
__
cpu
init
hub_rtc_init
(
cnodeid_t
cnode
)
{
/*
* We only need to initialize the current node.
...
...
arch/mips/sgi-ip27/ip27-xtalk.c
View file @
234fcd14
...
...
@@ -22,7 +22,7 @@
extern
int
bridge_probe
(
nasid_t
nasid
,
int
widget
,
int
masterwid
);
static
int
__init
probe_one_port
(
nasid_t
nasid
,
int
widget
,
int
masterwid
)
static
int
__
cpu
init
probe_one_port
(
nasid_t
nasid
,
int
widget
,
int
masterwid
)
{
widgetreg_t
widget_id
;
xwidget_part_num_t
partnum
;
...
...
@@ -46,7 +46,7 @@ static int __init probe_one_port(nasid_t nasid, int widget, int masterwid)
return
0
;
}
static
int
__init
xbow_probe
(
nasid_t
nasid
)
static
int
__
cpu
init
xbow_probe
(
nasid_t
nasid
)
{
lboard_t
*
brd
;
klxbow_t
*
xbow_p
;
...
...
@@ -99,7 +99,7 @@ static int __init xbow_probe(nasid_t nasid)
return
0
;
}
void
__init
xtalk_probe_node
(
cnodeid_t
nid
)
void
__
cpu
init
xtalk_probe_node
(
cnodeid_t
nid
)
{
volatile
u64
hubreg
;
nasid_t
nasid
;
...
...
include/asm-mips/cacheflush.h
View file @
234fcd14
...
...
@@ -93,7 +93,7 @@ extern void (*flush_data_cache_page)(unsigned long addr);
clear_bit(PG_dcache_dirty, &(page)->flags)
/* Run kernel code uncached, useful for cache probing functions. */
unsigned
long
__init
run_uncached
(
void
*
func
);
unsigned
long
run_uncached
(
void
*
func
);
extern
void
*
kmap_coherent
(
struct
page
*
page
,
unsigned
long
addr
);
extern
void
kunmap_coherent
(
void
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment