Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
cd5bc89d
Commit
cd5bc89d
authored
Aug 03, 2008
by
David S. Miller
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sparc64: Use cpumask_t pointers and for_each_cpu_mask_nr() in xcall_deliver.
Signed-off-by:
David S. Miller
<
davem@davemloft.net
>
parent
622824db
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
21 additions
and
18 deletions
+21
-18
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+21
-18
No files found.
arch/sparc64/kernel/smp.c
View file @
cd5bc89d
...
@@ -459,13 +459,13 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u
...
@@ -459,13 +459,13 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u
}
}
}
}
static
inline
void
spitfire_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
pumask_t
mask
)
static
inline
void
spitfire_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
onst
cpumask_t
*
mask
)
{
{
u64
pstate
;
u64
pstate
;
int
i
;
int
i
;
__asm__
__volatile__
(
"rdpr %%pstate, %0"
:
"=r"
(
pstate
));
__asm__
__volatile__
(
"rdpr %%pstate, %0"
:
"=r"
(
pstate
));
for_each_cpu_mask
(
i
,
mask
)
for_each_cpu_mask
_nr
(
i
,
*
mask
)
spitfire_xcall_helper
(
data0
,
data1
,
data2
,
pstate
,
i
);
spitfire_xcall_helper
(
data0
,
data1
,
data2
,
pstate
,
i
);
}
}
...
@@ -473,14 +473,17 @@ static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpuma
...
@@ -473,14 +473,17 @@ static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpuma
* packet, but we have no use for that. However we do take advantage of
* packet, but we have no use for that. However we do take advantage of
* the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
* the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
*/
*/
static
void
cheetah_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
pumask_t
mask
)
static
void
cheetah_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
onst
cpumask_t
*
mask_p
)
{
{
u64
pstate
,
ver
,
busy_mask
;
u64
pstate
,
ver
,
busy_mask
;
int
nack_busy_id
,
is_jbus
,
need_more
;
int
nack_busy_id
,
is_jbus
,
need_more
;
cpumask_t
mask
;
if
(
cpus_empty
(
mask
))
if
(
cpus_empty
(
*
mask_p
))
return
;
return
;
mask
=
*
mask_p
;
/* Unfortunately, someone at Sun had the brilliant idea to make the
/* Unfortunately, someone at Sun had the brilliant idea to make the
* busy/nack fields hard-coded by ITID number for this Ultra-III
* busy/nack fields hard-coded by ITID number for this Ultra-III
* derivative processor.
* derivative processor.
...
@@ -511,7 +514,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
...
@@ -511,7 +514,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
{
{
int
i
;
int
i
;
for_each_cpu_mask
(
i
,
mask
)
{
for_each_cpu_mask
_nr
(
i
,
mask
)
{
u64
target
=
(
i
<<
14
)
|
0x70
;
u64
target
=
(
i
<<
14
)
|
0x70
;
if
(
is_jbus
)
{
if
(
is_jbus
)
{
...
@@ -550,7 +553,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
...
@@ -550,7 +553,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
:
:
"r"
(
pstate
));
:
:
"r"
(
pstate
));
if
(
unlikely
(
need_more
))
{
if
(
unlikely
(
need_more
))
{
int
i
,
cnt
=
0
;
int
i
,
cnt
=
0
;
for_each_cpu_mask
(
i
,
mask
)
{
for_each_cpu_mask
_nr
(
i
,
mask
)
{
cpu_clear
(
i
,
mask
);
cpu_clear
(
i
,
mask
);
cnt
++
;
cnt
++
;
if
(
cnt
==
32
)
if
(
cnt
==
32
)
...
@@ -584,7 +587,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
...
@@ -584,7 +587,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
/* Clear out the mask bits for cpus which did not
/* Clear out the mask bits for cpus which did not
* NACK us.
* NACK us.
*/
*/
for_each_cpu_mask
(
i
,
mask
)
{
for_each_cpu_mask
_nr
(
i
,
mask
)
{
u64
check_mask
;
u64
check_mask
;
if
(
is_jbus
)
if
(
is_jbus
)
...
@@ -605,16 +608,16 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
...
@@ -605,16 +608,16 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
}
}
/* Multi-cpu list version. */
/* Multi-cpu list version. */
static
void
hypervisor_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
pumask_t
mask
)
static
void
hypervisor_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
onst
cpumask_t
*
mask
)
{
{
int
cnt
,
retries
,
this_cpu
,
prev_sent
,
i
;
unsigned
long
flags
,
status
;
cpumask_t
error_mask
;
struct
trap_per_cpu
*
tb
;
struct
trap_per_cpu
*
tb
;
u16
*
cpu_list
;
u16
*
cpu_list
;
u64
*
mondo
;
u64
*
mondo
;
cpumask_t
error_mask
;
unsigned
long
flags
,
status
;
int
cnt
,
retries
,
this_cpu
,
prev_sent
,
i
;
if
(
cpus_empty
(
mask
))
if
(
cpus_empty
(
*
mask
))
return
;
return
;
/* We have to do this whole thing with interrupts fully disabled.
/* We have to do this whole thing with interrupts fully disabled.
...
@@ -642,7 +645,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
...
@@ -642,7 +645,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
/* Setup the initial cpu list. */
/* Setup the initial cpu list. */
cnt
=
0
;
cnt
=
0
;
for_each_cpu_mask
(
i
,
mask
)
for_each_cpu_mask
_nr
(
i
,
*
mask
)
cpu_list
[
cnt
++
]
=
i
;
cpu_list
[
cnt
++
]
=
i
;
cpus_clear
(
error_mask
);
cpus_clear
(
error_mask
);
...
@@ -729,7 +732,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
...
@@ -729,7 +732,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
"were in error state
\n
"
,
"were in error state
\n
"
,
this_cpu
);
this_cpu
);
printk
(
KERN_CRIT
"CPU[%d]: Error mask [ "
,
this_cpu
);
printk
(
KERN_CRIT
"CPU[%d]: Error mask [ "
,
this_cpu
);
for_each_cpu_mask
(
i
,
error_mask
)
for_each_cpu_mask
_nr
(
i
,
error_mask
)
printk
(
"%d "
,
i
);
printk
(
"%d "
,
i
);
printk
(
"]
\n
"
);
printk
(
"]
\n
"
);
return
;
return
;
...
@@ -756,7 +759,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
...
@@ -756,7 +759,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
printk
(
"]
\n
"
);
printk
(
"]
\n
"
);
}
}
static
void
(
*
xcall_deliver
)(
u64
,
u64
,
u64
,
c
pumask_t
);
static
void
(
*
xcall_deliver
)(
u64
,
u64
,
u64
,
c
onst
cpumask_t
*
);
/* Send cross call to all processors mentioned in MASK
/* Send cross call to all processors mentioned in MASK
* except self.
* except self.
...
@@ -769,7 +772,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
...
@@ -769,7 +772,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
cpus_and
(
mask
,
mask
,
cpu_online_map
);
cpus_and
(
mask
,
mask
,
cpu_online_map
);
cpu_clear
(
this_cpu
,
mask
);
cpu_clear
(
this_cpu
,
mask
);
xcall_deliver
(
data0
,
data1
,
data2
,
mask
);
xcall_deliver
(
data0
,
data1
,
data2
,
&
mask
);
/* NOTE: Caller runs local copy on master. */
/* NOTE: Caller runs local copy on master. */
put_cpu
();
put_cpu
();
...
@@ -903,7 +906,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
...
@@ -903,7 +906,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
}
}
if
(
data0
)
{
if
(
data0
)
{
xcall_deliver
(
data0
,
__pa
(
pg_addr
),
xcall_deliver
(
data0
,
__pa
(
pg_addr
),
(
u64
)
pg_addr
,
mask
);
(
u64
)
pg_addr
,
&
mask
);
#ifdef CONFIG_DEBUG_DCFLUSH
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc
(
&
dcpage_flushes_xcall
);
atomic_inc
(
&
dcpage_flushes_xcall
);
#endif
#endif
...
@@ -945,7 +948,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
...
@@ -945,7 +948,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
}
}
if
(
data0
)
{
if
(
data0
)
{
xcall_deliver
(
data0
,
__pa
(
pg_addr
),
xcall_deliver
(
data0
,
__pa
(
pg_addr
),
(
u64
)
pg_addr
,
mask
);
(
u64
)
pg_addr
,
&
mask
);
#ifdef CONFIG_DEBUG_DCFLUSH
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc
(
&
dcpage_flushes_xcall
);
atomic_inc
(
&
dcpage_flushes_xcall
);
#endif
#endif
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment