Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3b05e668
Commit
3b05e668
authored
Jun 13, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/home/mingo/BK/linux-2.5-sched/
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
bff61867
d58a247c
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
27 additions
and
40 deletions
+27
-40
arch/i386/kernel/entry.S
arch/i386/kernel/entry.S
+1
-0
include/asm-i386/system.h
include/asm-i386/system.h
+5
-2
kernel/sched.c
kernel/sched.c
+21
-38
No files found.
arch/i386/kernel/entry.S
View file @
3b05e668
...
@@ -193,6 +193,7 @@ ENTRY(lcall27)
...
@@ -193,6 +193,7 @@ ENTRY(lcall27)
ENTRY
(
ret_from_fork
)
ENTRY
(
ret_from_fork
)
#if CONFIG_SMP || CONFIG_PREEMPT
#if CONFIG_SMP || CONFIG_PREEMPT
#
NOTE
:
this
function
takes
a
parameter
but
it
's unused on x86.
call
schedule_tail
call
schedule_tail
#endif
#endif
GET_THREAD_INFO
(%
ebx
)
GET_THREAD_INFO
(%
ebx
)
...
...
include/asm-i386/system.h
View file @
3b05e668
...
@@ -11,9 +11,12 @@
...
@@ -11,9 +11,12 @@
struct
task_struct
;
/* one of the stranger aspects of C forward declarations.. */
struct
task_struct
;
/* one of the stranger aspects of C forward declarations.. */
extern
void
FASTCALL
(
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
));
extern
void
FASTCALL
(
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
));
#define prepare_to_switch() do { } while(0)
#define prepare_arch_schedule(prev) do { } while(0)
#define finish_arch_schedule(prev) do { } while(0)
#define prepare_arch_switch(rq) do { } while(0)
#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
#define switch_to(prev,next) do { \
#define switch_to(prev,next
,last
) do { \
asm volatile("pushl %%esi\n\t" \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \
"pushl %%ebp\n\t" \
...
...
kernel/sched.c
View file @
3b05e668
...
@@ -156,12 +156,6 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
...
@@ -156,12 +156,6 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
* task_rq_lock - lock the runqueue a given task resides on and disable
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
* explicitly disabling preemption.
*
* WARNING: to squeeze out a few more cycles we do not disable preemption
* explicitly (or implicitly), we just keep interrupts disabled. This means
* that within task_rq_lock/unlock sections you must be careful
* about locking/unlocking spinlocks, since they could cause an unexpected
* preemption.
*/
*/
static
inline
runqueue_t
*
task_rq_lock
(
task_t
*
p
,
unsigned
long
*
flags
)
static
inline
runqueue_t
*
task_rq_lock
(
task_t
*
p
,
unsigned
long
*
flags
)
{
{
...
@@ -170,9 +164,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
...
@@ -170,9 +164,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
repeat_lock_task:
repeat_lock_task:
local_irq_save
(
*
flags
);
local_irq_save
(
*
flags
);
rq
=
task_rq
(
p
);
rq
=
task_rq
(
p
);
_raw_
spin_lock
(
&
rq
->
lock
);
spin_lock
(
&
rq
->
lock
);
if
(
unlikely
(
rq
!=
task_rq
(
p
)))
{
if
(
unlikely
(
rq
!=
task_rq
(
p
)))
{
_raw_
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
goto
repeat_lock_task
;
goto
repeat_lock_task
;
}
}
return
rq
;
return
rq
;
...
@@ -180,8 +174,7 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
...
@@ -180,8 +174,7 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
static
inline
void
task_rq_unlock
(
runqueue_t
*
rq
,
unsigned
long
*
flags
)
static
inline
void
task_rq_unlock
(
runqueue_t
*
rq
,
unsigned
long
*
flags
)
{
{
_raw_spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
preempt_check_resched
();
}
}
/*
/*
...
@@ -289,15 +282,8 @@ static inline void resched_task(task_t *p)
...
@@ -289,15 +282,8 @@ static inline void resched_task(task_t *p)
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
if
(
!
need_resched
&&
!
nrpolling
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
()))
if
(
!
need_resched
&&
!
nrpolling
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
()))
/*
* NOTE: smp_send_reschedule() can be called from
* spinlocked sections which do not have an elevated
* preemption count. So the code either has to avoid
* spinlocks, or has to put preempt_disable() and
* preempt_enable_no_resched() around the code.
*/
smp_send_reschedule
(
p
->
thread_info
->
cpu
);
smp_send_reschedule
(
p
->
thread_info
->
cpu
);
preempt_enable
_no_resched
();
preempt_enable
();
#else
#else
set_tsk_need_resched
(
p
);
set_tsk_need_resched
(
p
);
#endif
#endif
...
@@ -348,10 +334,8 @@ void wait_task_inactive(task_t * p)
...
@@ -348,10 +334,8 @@ void wait_task_inactive(task_t * p)
*/
*/
void
kick_if_running
(
task_t
*
p
)
void
kick_if_running
(
task_t
*
p
)
{
{
if
(
p
==
task_rq
(
p
)
->
curr
)
{
if
(
p
==
task_rq
(
p
)
->
curr
)
resched_task
(
p
);
resched_task
(
p
);
preempt_check_resched
();
}
}
}
#endif
#endif
...
@@ -451,19 +435,18 @@ void sched_exit(task_t * p)
...
@@ -451,19 +435,18 @@ void sched_exit(task_t * p)
}
}
#if CONFIG_SMP || CONFIG_PREEMPT
#if CONFIG_SMP || CONFIG_PREEMPT
asmlinkage
void
schedule_tail
(
void
)
asmlinkage
void
schedule_tail
(
task_t
*
prev
)
{
{
spin_unlock_irq
(
&
this_rq
()
->
lock
);
finish_arch_switch
(
this_rq
());
finish_arch_schedule
(
prev
);
}
}
#endif
#endif
static
inline
void
context_switch
(
task_t
*
prev
,
task_t
*
next
)
static
inline
task_t
*
context_switch
(
task_t
*
prev
,
task_t
*
next
)
{
{
struct
mm_struct
*
mm
=
next
->
mm
;
struct
mm_struct
*
mm
=
next
->
mm
;
struct
mm_struct
*
oldmm
=
prev
->
active_mm
;
struct
mm_struct
*
oldmm
=
prev
->
active_mm
;
prepare_to_switch
();
if
(
unlikely
(
!
mm
))
{
if
(
unlikely
(
!
mm
))
{
next
->
active_mm
=
oldmm
;
next
->
active_mm
=
oldmm
;
atomic_inc
(
&
oldmm
->
mm_count
);
atomic_inc
(
&
oldmm
->
mm_count
);
...
@@ -477,7 +460,9 @@ static inline void context_switch(task_t *prev, task_t *next)
...
@@ -477,7 +460,9 @@ static inline void context_switch(task_t *prev, task_t *next)
}
}
/* Here we just switch the register state and the stack. */
/* Here we just switch the register state and the stack. */
switch_to
(
prev
,
next
);
switch_to
(
prev
,
next
,
prev
);
return
prev
;
}
}
unsigned
long
nr_running
(
void
)
unsigned
long
nr_running
(
void
)
...
@@ -823,6 +808,7 @@ asmlinkage void schedule(void)
...
@@ -823,6 +808,7 @@ asmlinkage void schedule(void)
rq
=
this_rq
();
rq
=
this_rq
();
release_kernel_lock
(
prev
,
smp_processor_id
());
release_kernel_lock
(
prev
,
smp_processor_id
());
prepare_arch_schedule
(
prev
);
prev
->
sleep_timestamp
=
jiffies
;
prev
->
sleep_timestamp
=
jiffies
;
spin_lock_irq
(
&
rq
->
lock
);
spin_lock_irq
(
&
rq
->
lock
);
...
@@ -878,23 +864,20 @@ asmlinkage void schedule(void)
...
@@ -878,23 +864,20 @@ asmlinkage void schedule(void)
if
(
likely
(
prev
!=
next
))
{
if
(
likely
(
prev
!=
next
))
{
rq
->
nr_switches
++
;
rq
->
nr_switches
++
;
rq
->
curr
=
next
;
rq
->
curr
=
next
;
context_switch
(
prev
,
next
);
prepare_arch_switch
(
rq
);
/*
prev
=
context_switch
(
prev
,
next
);
* The runqueue pointer might be from another CPU
barrier
();
* if the new task was last running on a different
* CPU - thus re-load it.
*/
mb
();
rq
=
this_rq
();
rq
=
this_rq
();
}
finish_arch_switch
(
rq
);
spin_unlock_irq
(
&
rq
->
lock
);
}
else
spin_unlock_irq
(
&
rq
->
lock
);
finish_arch_schedule
(
prev
);
reacquire_kernel_lock
(
current
);
reacquire_kernel_lock
(
current
);
preempt_enable_no_resched
();
preempt_enable_no_resched
();
if
(
test_thread_flag
(
TIF_NEED_RESCHED
))
if
(
test_thread_flag
(
TIF_NEED_RESCHED
))
goto
need_resched
;
goto
need_resched
;
return
;
}
}
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment