Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
019afe86
Commit
019afe86
authored
Jun 11, 2002
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
- squeeze a few more cycles out of the wakeup hotpath.
parent
b5734da1
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
28 additions
and
5 deletions
+28
-5
include/linux/spinlock.h
include/linux/spinlock.h
+8
-0
kernel/sched.c
kernel/sched.c
+20
-5
No files found.
include/linux/spinlock.h
View file @
019afe86
...
...
@@ -26,6 +26,7 @@
#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0)
#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
...
...
@@ -143,6 +144,12 @@ do { \
preempt_schedule(); \
} while (0)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define spin_lock(lock) \
do { \
preempt_disable(); \
...
...
@@ -176,6 +183,7 @@ do { \
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {} while(0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
#define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock)
...
...
kernel/sched.c
View file @
019afe86
...
...
@@ -156,6 +156,12 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*
* WARNING: to squeeze out a few more cycles we do not disable preemption
* explicitly (or implicitly), we just keep interrupts disabled. This means
* that within task_rq_lock/unlock sections you must be careful
* about locking/unlocking spinlocks, since they could cause an unexpected
* preemption.
*/
static
inline
runqueue_t
*
task_rq_lock
(
task_t
*
p
,
unsigned
long
*
flags
)
{
...
...
@@ -164,9 +170,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
repeat_lock_task:
local_irq_save
(
*
flags
);
rq
=
task_rq
(
p
);
spin_lock
(
&
rq
->
lock
);
_raw_
spin_lock
(
&
rq
->
lock
);
if
(
unlikely
(
rq
!=
task_rq
(
p
)))
{
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
_raw_
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
goto
repeat_lock_task
;
}
return
rq
;
...
...
@@ -174,7 +180,7 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
static
inline
void
task_rq_unlock
(
runqueue_t
*
rq
,
unsigned
long
*
flags
)
{
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
_raw_
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
}
/*
...
...
@@ -282,8 +288,15 @@ static inline void resched_task(task_t *p)
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
if
(
!
need_resched
&&
!
nrpolling
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
()))
/*
* NOTE: smp_send_reschedule() can be called from
* spinlocked sections which do not have an elevated
* preemption count. So the code either has to avoid
* spinlocks, or has to put preempt_disable() and
* preempt_enable_no_resched() around the code.
*/
smp_send_reschedule
(
p
->
thread_info
->
cpu
);
preempt_enable
();
preempt_enable
_no_resched
();
#else
set_tsk_need_resched
(
p
);
#endif
...
...
@@ -334,8 +347,10 @@ void wait_task_inactive(task_t * p)
*/
void
kick_if_running
(
task_t
*
p
)
{
if
(
p
==
task_rq
(
p
)
->
curr
)
if
(
p
==
task_rq
(
p
)
->
curr
)
{
resched_task
(
p
);
preempt_check_resched
();
}
}
#endif
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment