Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
2b75b535
Commit
2b75b535
authored
Jun 11, 2002
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
- put the sync wakeup feature back in, based on Mike Kravetz's patch.
parent
c700d531
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
49 additions
and
9 deletions
+49
-9
fs/pipe.c
fs/pipe.c
+2
-2
include/linux/sched.h
include/linux/sched.h
+6
-0
kernel/ksyms.c
kernel/ksyms.c
+3
-0
kernel/sched.c
kernel/sched.c
+38
-7
No files found.
fs/pipe.c
View file @
2b75b535
...
...
@@ -119,7 +119,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
* writers synchronously that there is more
* room.
*/
wake_up_interruptible
(
PIPE_WAIT
(
*
inode
));
wake_up_interruptible
_sync
(
PIPE_WAIT
(
*
inode
));
kill_fasync
(
PIPE_FASYNC_WRITERS
(
*
inode
),
SIGIO
,
POLL_OUT
);
if
(
!
PIPE_EMPTY
(
*
inode
))
BUG
();
...
...
@@ -219,7 +219,7 @@ pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
* is going to give up this CPU, so it doesnt have
* to do idle reschedules.
*/
wake_up_interruptible
(
PIPE_WAIT
(
*
inode
));
wake_up_interruptible
_sync
(
PIPE_WAIT
(
*
inode
));
kill_fasync
(
PIPE_FASYNC_READERS
(
*
inode
),
SIGIO
,
POLL_IN
);
PIPE_WAITING_WRITERS
(
*
inode
)
++
;
pipe_wait
(
inode
);
...
...
include/linux/sched.h
View file @
2b75b535
...
...
@@ -491,6 +491,7 @@ extern unsigned long prof_len;
extern
unsigned
long
prof_shift
;
extern
void
FASTCALL
(
__wake_up
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr
));
extern
void
FASTCALL
(
__wake_up_sync
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr
));
extern
void
FASTCALL
(
sleep_on
(
wait_queue_head_t
*
q
));
extern
long
FASTCALL
(
sleep_on_timeout
(
wait_queue_head_t
*
q
,
signed
long
timeout
));
...
...
@@ -507,6 +508,11 @@ extern void FASTCALL(sched_exit(task_t * p));
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
#ifdef CONFIG_SMP
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
#else
#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#endif
asmlinkage
long
sys_wait4
(
pid_t
pid
,
unsigned
int
*
stat_addr
,
int
options
,
struct
rusage
*
ru
);
extern
int
in_group_p
(
gid_t
);
...
...
kernel/ksyms.c
View file @
2b75b535
...
...
@@ -457,6 +457,9 @@ EXPORT_SYMBOL(iomem_resource);
/* process management */
EXPORT_SYMBOL
(
complete_and_exit
);
EXPORT_SYMBOL
(
__wake_up
);
#if CONFIG_SMP
EXPORT_SYMBOL_GPL
(
__wake_up_sync
);
/* internal use only */
#endif
EXPORT_SYMBOL
(
wake_up_process
);
EXPORT_SYMBOL
(
sleep_on
);
EXPORT_SYMBOL
(
sleep_on_timeout
);
...
...
kernel/sched.c
View file @
2b75b535
...
...
@@ -321,31 +321,43 @@ void kick_if_running(task_t * p)
* "current->state = TASK_RUNNING" to mark yourself runnable
* without the overhead of this.
*/
static
int
try_to_wake_up
(
task_t
*
p
)
static
int
try_to_wake_up
(
task_t
*
p
,
int
sync
)
{
unsigned
long
flags
;
int
success
=
0
;
long
old_state
;
runqueue_t
*
rq
;
repeat_lock_task:
rq
=
task_rq_lock
(
p
,
&
flags
);
old_state
=
p
->
state
;
p
->
state
=
TASK_RUNNING
;
if
(
!
p
->
array
)
{
if
(
unlikely
(
sync
&&
(
rq
->
curr
!=
p
)))
{
if
(
p
->
thread_info
->
cpu
!=
smp_processor_id
())
{
p
->
thread_info
->
cpu
=
smp_processor_id
();
task_rq_unlock
(
rq
,
&
flags
);
goto
repeat_lock_task
;
}
}
if
(
old_state
==
TASK_UNINTERRUPTIBLE
)
rq
->
nr_uninterruptible
--
;
activate_task
(
p
,
rq
);
/*
* If sync is set, a resched_task() is a NOOP
*/
if
(
p
->
prio
<
rq
->
curr
->
prio
)
resched_task
(
rq
->
curr
);
success
=
1
;
}
p
->
state
=
TASK_RUNNING
;
task_rq_unlock
(
rq
,
&
flags
);
return
success
;
}
int
wake_up_process
(
task_t
*
p
)
{
return
try_to_wake_up
(
p
);
return
try_to_wake_up
(
p
,
0
);
}
void
wake_up_forked_process
(
task_t
*
p
)
...
...
@@ -874,7 +886,7 @@ asmlinkage void preempt_schedule(void)
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static
inline
void
__wake_up_common
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr_exclusive
)
static
inline
void
__wake_up_common
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr_exclusive
,
int
sync
)
{
struct
list_head
*
tmp
;
unsigned
int
state
;
...
...
@@ -885,7 +897,7 @@ static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int
curr
=
list_entry
(
tmp
,
wait_queue_t
,
task_list
);
p
=
curr
->
task
;
state
=
p
->
state
;
if
((
state
&
mode
)
&&
try_to_wake_up
(
p
)
&&
if
((
state
&
mode
)
&&
try_to_wake_up
(
p
,
sync
)
&&
((
curr
->
flags
&
WQ_FLAG_EXCLUSIVE
)
&&
!--
nr_exclusive
))
break
;
}
...
...
@@ -899,17 +911,36 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
return
;
spin_lock_irqsave
(
&
q
->
lock
,
flags
);
__wake_up_common
(
q
,
mode
,
nr_exclusive
);
__wake_up_common
(
q
,
mode
,
nr_exclusive
,
0
);
spin_unlock_irqrestore
(
&
q
->
lock
,
flags
);
}
#if CONFIG_SMP
void
__wake_up_sync
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr_exclusive
)
{
unsigned
long
flags
;
if
(
unlikely
(
!
q
))
return
;
spin_lock_irqsave
(
&
q
->
lock
,
flags
);
if
(
likely
(
nr_exclusive
))
__wake_up_common
(
q
,
mode
,
nr_exclusive
,
1
);
else
__wake_up_common
(
q
,
mode
,
nr_exclusive
,
0
);
spin_unlock_irqrestore
(
&
q
->
lock
,
flags
);
}
#endif
void
complete
(
struct
completion
*
x
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
x
->
wait
.
lock
,
flags
);
x
->
done
++
;
__wake_up_common
(
&
x
->
wait
,
TASK_UNINTERRUPTIBLE
|
TASK_INTERRUPTIBLE
,
1
);
__wake_up_common
(
&
x
->
wait
,
TASK_UNINTERRUPTIBLE
|
TASK_INTERRUPTIBLE
,
1
,
0
);
spin_unlock_irqrestore
(
&
x
->
wait
.
lock
,
flags
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment