Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
b5eb5511
Commit
b5eb5511
authored
Oct 03, 2007
by
Ralf Baechle
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[MIPS] Kill num_online_cpus() loops.
Signed-off-by:
Ralf Baechle
<
ralf@linux-mips.org
>
parent
bd6aeeff
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
27 additions
and
18 deletions
+27
-18
arch/mips/kernel/gdb-stub.c
arch/mips/kernel/gdb-stub.c
+2
-2
arch/mips/kernel/smp.c
arch/mips/kernel/smp.c
+21
-12
arch/mips/kernel/smtc.c
arch/mips/kernel/smtc.c
+2
-2
include/asm-mips/mmu_context.h
include/asm-mips/mmu_context.h
+2
-2
No files found.
arch/mips/kernel/gdb-stub.c
View file @
b5eb5511
...
@@ -769,7 +769,7 @@ void handle_exception(struct gdb_regs *regs)
...
@@ -769,7 +769,7 @@ void handle_exception(struct gdb_regs *regs)
/*
/*
* acquire the CPU spinlocks
* acquire the CPU spinlocks
*/
*/
for
(
i
=
num_online_cpus
()
-
1
;
i
>=
0
;
i
--
)
for
_each_online_cpu
(
i
)
if
(
__raw_spin_trylock
(
&
kgdb_cpulock
[
i
])
==
0
)
if
(
__raw_spin_trylock
(
&
kgdb_cpulock
[
i
])
==
0
)
panic
(
"kgdb: couldn't get cpulock %d
\n
"
,
i
);
panic
(
"kgdb: couldn't get cpulock %d
\n
"
,
i
);
...
@@ -1044,7 +1044,7 @@ void handle_exception(struct gdb_regs *regs)
...
@@ -1044,7 +1044,7 @@ void handle_exception(struct gdb_regs *regs)
exit_kgdb_exception:
exit_kgdb_exception:
/* release locks so other CPUs can go */
/* release locks so other CPUs can go */
for
(
i
=
num_online_cpus
()
-
1
;
i
>=
0
;
i
--
)
for
_each_online_cpu
(
i
)
__raw_spin_unlock
(
&
kgdb_cpulock
[
i
]);
__raw_spin_unlock
(
&
kgdb_cpulock
[
i
]);
spin_unlock
(
&
kgdb_lock
);
spin_unlock
(
&
kgdb_lock
);
...
...
arch/mips/kernel/smp.c
View file @
b5eb5511
...
@@ -375,10 +375,13 @@ void flush_tlb_mm(struct mm_struct *mm)
...
@@ -375,10 +375,13 @@ void flush_tlb_mm(struct mm_struct *mm)
if
((
atomic_read
(
&
mm
->
mm_users
)
!=
1
)
||
(
current
->
mm
!=
mm
))
{
if
((
atomic_read
(
&
mm
->
mm_users
)
!=
1
)
||
(
current
->
mm
!=
mm
))
{
smp_on_other_tlbs
(
flush_tlb_mm_ipi
,
(
void
*
)
mm
);
smp_on_other_tlbs
(
flush_tlb_mm_ipi
,
(
void
*
)
mm
);
}
else
{
}
else
{
int
i
;
cpumask_t
mask
=
cpu_online_map
;
for
(
i
=
0
;
i
<
num_online_cpus
();
i
++
)
unsigned
int
cpu
;
if
(
smp_processor_id
()
!=
i
)
cpu_context
(
i
,
mm
)
=
0
;
cpu_clear
(
smp_processor_id
(),
mask
);
for_each_online_cpu
(
cpu
)
if
(
cpu_context
(
cpu
,
mm
))
cpu_context
(
cpu
,
mm
)
=
0
;
}
}
local_flush_tlb_mm
(
mm
);
local_flush_tlb_mm
(
mm
);
...
@@ -411,10 +414,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
...
@@ -411,10 +414,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
fd
.
addr2
=
end
;
fd
.
addr2
=
end
;
smp_on_other_tlbs
(
flush_tlb_range_ipi
,
(
void
*
)
&
fd
);
smp_on_other_tlbs
(
flush_tlb_range_ipi
,
(
void
*
)
&
fd
);
}
else
{
}
else
{
int
i
;
cpumask_t
mask
=
cpu_online_map
;
for
(
i
=
0
;
i
<
num_online_cpus
();
i
++
)
unsigned
int
cpu
;
if
(
smp_processor_id
()
!=
i
)
cpu_context
(
i
,
mm
)
=
0
;
cpu_clear
(
smp_processor_id
(),
mask
);
for_each_online_cpu
(
cpu
)
if
(
cpu_context
(
cpu
,
mm
))
cpu_context
(
cpu
,
mm
)
=
0
;
}
}
local_flush_tlb_range
(
vma
,
start
,
end
);
local_flush_tlb_range
(
vma
,
start
,
end
);
preempt_enable
();
preempt_enable
();
...
@@ -453,10 +459,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
...
@@ -453,10 +459,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
fd
.
addr1
=
page
;
fd
.
addr1
=
page
;
smp_on_other_tlbs
(
flush_tlb_page_ipi
,
(
void
*
)
&
fd
);
smp_on_other_tlbs
(
flush_tlb_page_ipi
,
(
void
*
)
&
fd
);
}
else
{
}
else
{
int
i
;
cpumask_t
mask
=
cpu_online_map
;
for
(
i
=
0
;
i
<
num_online_cpus
();
i
++
)
unsigned
int
cpu
;
if
(
smp_processor_id
()
!=
i
)
cpu_context
(
i
,
vma
->
vm_mm
)
=
0
;
cpu_clear
(
smp_processor_id
(),
mask
);
for_each_online_cpu
(
cpu
)
if
(
cpu_context
(
cpu
,
vma
->
vm_mm
))
cpu_context
(
cpu
,
vma
->
vm_mm
)
=
0
;
}
}
local_flush_tlb_page
(
vma
,
page
);
local_flush_tlb_page
(
vma
,
page
);
preempt_enable
();
preempt_enable
();
...
...
arch/mips/kernel/smtc.c
View file @
b5eb5511
...
@@ -1264,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
...
@@ -1264,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
if
(
cpu_has_vtag_icache
)
if
(
cpu_has_vtag_icache
)
flush_icache_all
();
flush_icache_all
();
/* Traverse all online CPUs (hack requires contigous range) */
/* Traverse all online CPUs (hack requires contigous range) */
for
(
i
=
0
;
i
<
num_online_cpus
();
i
++
)
{
for
_each_online_cpu
(
i
)
{
/*
/*
* We don't need to worry about our own CPU, nor those of
* We don't need to worry about our own CPU, nor those of
* CPUs who don't share our TLB.
* CPUs who don't share our TLB.
...
@@ -1293,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
...
@@ -1293,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
/*
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
* SMTC shares the TLB within VPEs and possibly across all VPEs.
*/
*/
for
(
i
=
0
;
i
<
num_online_cpus
();
i
++
)
{
for
_each_online_cpu
(
i
)
{
if
((
smtc_status
&
SMTC_TLB_SHARED
)
||
if
((
smtc_status
&
SMTC_TLB_SHARED
)
||
(
cpu_data
[
i
].
vpe_id
==
cpu_data
[
cpu
].
vpe_id
))
(
cpu_data
[
i
].
vpe_id
==
cpu_data
[
cpu
].
vpe_id
))
cpu_context
(
i
,
mm
)
=
asid_cache
(
i
)
=
asid
;
cpu_context
(
i
,
mm
)
=
asid_cache
(
i
)
=
asid
;
...
...
include/asm-mips/mmu_context.h
View file @
b5eb5511
...
@@ -120,7 +120,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
...
@@ -120,7 +120,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
{
int
i
;
int
i
;
for
(
i
=
0
;
i
<
num_online_cpus
();
i
++
)
for
_each_online_cpu
(
i
)
cpu_context
(
i
,
mm
)
=
0
;
cpu_context
(
i
,
mm
)
=
0
;
return
0
;
return
0
;
...
@@ -284,7 +284,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
...
@@ -284,7 +284,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
int
i
;
int
i
;
/* SMTC shares the TLB (and ASIDs) across VPEs */
/* SMTC shares the TLB (and ASIDs) across VPEs */
for
(
i
=
0
;
i
<
num_online_cpus
();
i
++
)
{
for
_each_online_cpu
(
i
)
{
if
((
smtc_status
&
SMTC_TLB_SHARED
)
if
((
smtc_status
&
SMTC_TLB_SHARED
)
||
(
cpu_data
[
i
].
vpe_id
==
cpu_data
[
cpu
].
vpe_id
))
||
(
cpu_data
[
i
].
vpe_id
==
cpu_data
[
cpu
].
vpe_id
))
cpu_context
(
i
,
mm
)
=
0
;
cpu_context
(
i
,
mm
)
=
0
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment