Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
35e181a9
Commit
35e181a9
authored
Apr 17, 2002
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge k:t into elte.hu:/home/mingo/BK/mine/linux-2.5
parents
91813920
e5eeec2e
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
64 additions
and
29 deletions
+64
-29
kernel/sched.c
kernel/sched.c
+64
-29
No files found.
kernel/sched.c
View file @
35e181a9
...
...
@@ -1672,7 +1672,16 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
preempt_enable
();
}
/*
* Treat the bits of migration_mask as lock bits.
* If the bit corresponding to the cpu a migration_thread is
* running on then we have failed to claim our cpu and must
* yield in order to find another.
*/
static
volatile
unsigned
long
migration_mask
;
static
atomic_t
migration_threads_seeking_cpu
;
static
struct
completion
migration_complete
=
COMPLETION_INITIALIZER
(
migration_complete
);
static
int
migration_thread
(
void
*
unused
)
{
...
...
@@ -1696,26 +1705,54 @@ static int migration_thread(void * unused)
* task binds itself to the current CPU.
*/
/* wait for all migration threads to start up. */
while
(
!
migration_mask
)
yield
();
preempt_disable
();
for
(;;)
{
preempt_disable
();
if
(
test_and_clear_bit
(
smp_processor_id
(),
&
migration_mask
))
current
->
cpus_allowed
=
1
<<
smp_processor_id
();
if
(
test_thread_flag
(
TIF_NEED_RESCHED
))
schedule
();
if
(
!
migration_mask
)
break
;
/*
* Enter the loop with preemption disabled so that
* smp_processor_id() remains valid through the check. The
* interior of the wait loop re-enables preemption in an
* attempt to get scheduled off the current cpu. When the
* loop is exited the lock bit in migration_mask is acquired
* and preemption is disabled on the way out. This way the
* cpu acquired remains valid when ->cpus_allowed is set.
*/
while
(
test_and_set_bit
(
smp_processor_id
(),
&
migration_mask
))
{
preempt_enable
();
yield
();
preempt_disable
();
}
current
->
cpus_allowed
=
1
<<
smp_processor_id
();
rq
=
this_rq
();
rq
->
migration_thread
=
current
;
/*
* Now that we've bound ourselves to a cpu, post to
* migration_threads_seeking_cpu and wait for everyone else.
* Preemption should remain disabled and the cpu should remain
* in busywait. Yielding the cpu will allow the livelock
* where where a timing pattern causes an idle task seeking a
* migration_thread to always find the unbound migration_thread
* running on the cpu's it tries to steal tasks from.
*/
atomic_dec
(
&
migration_threads_seeking_cpu
);
while
(
atomic_read
(
&
migration_threads_seeking_cpu
))
cpu_relax
();
preempt_enable
();
sprintf
(
current
->
comm
,
"migration_CPU%d"
,
smp_processor_id
());
/*
* Everyone's found their cpu, so now wake migration_init().
* Multiple wakeups are harmless; removal from the waitqueue
* has locking built-in, and waking an empty queue is valid.
*/
complete
(
&
migration_complete
);
/*
* Initiate the event loop.
*/
for
(;;)
{
runqueue_t
*
rq_src
,
*
rq_dest
;
struct
list_head
*
head
;
...
...
@@ -1763,33 +1800,31 @@ static int migration_thread(void * unused)
void
__init
migration_init
(
void
)
{
unsigned
long
tmp
,
orig_cache_decay_ticks
;
unsigned
long
orig_cache_decay_ticks
;
int
cpu
;
tmp
=
0
;
for
(
cpu
=
0
;
cpu
<
smp_num_cpus
;
cpu
++
)
{
if
(
kernel_thread
(
migration_thread
,
NULL
,
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGNAL
)
<
0
)
BUG
();
tmp
|=
(
1UL
<<
cpu_logical_map
(
cpu
));
}
atomic_set
(
&
migration_threads_seeking_cpu
,
smp_num_cpus
);
migration_mask
=
tmp
;
preempt_disable
()
;
orig_cache_decay_ticks
=
cache_decay_ticks
;
cache_decay_ticks
=
0
;
for
(
cpu
=
0
;
cpu
<
smp_num_cpus
;
cpu
++
)
{
int
logical
=
cpu_logical_map
(
cpu
);
for
(
cpu
=
0
;
cpu
<
smp_num_cpus
;
cpu
++
)
if
(
kernel_thread
(
migration_thread
,
NULL
,
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGNAL
)
<
0
)
BUG
();
while
(
!
cpu_rq
(
logical
)
->
migration_thread
)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
schedule_timeout
(
2
);
}
}
if
(
migration_mask
)
BUG
(
);
/*
* We cannot have missed the wakeup for the migration_thread
* bound for the cpu migration_init() is running on cannot
* acquire this cpu until migration_init() has yielded it by
* means of wait_for_completion().
*/
wait_for_completion
(
&
migration_complete
);
cache_decay_ticks
=
orig_cache_decay_ticks
;
preempt_enable
();
}
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment