Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
f2a5155d
Commit
f2a5155d
authored
Sep 17, 2002
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/home/paulus/kernel/linux-2.5
into samba.org:/home/paulus/kernel/for-linus-ppc
parents
81803bc1
9325c684
Changes
43
Show whitespace changes
Inline
Side-by-side
Showing
43 changed files
with
388 additions
and
353 deletions
+388
-353
arch/i386/kernel/cpu/mtrr/generic.c
arch/i386/kernel/cpu/mtrr/generic.c
+3
-1
arch/i386/kernel/vm86.c
arch/i386/kernel/vm86.c
+5
-4
drivers/char/hvc_console.c
drivers/char/hvc_console.c
+1
-1
drivers/char/sysrq.c
drivers/char/sysrq.c
+1
-1
drivers/char/tty_io.c
drivers/char/tty_io.c
+5
-5
drivers/pnp/pnpbios_core.c
drivers/pnp/pnpbios_core.c
+1
-1
fs/exec.c
fs/exec.c
+131
-48
fs/fcntl.c
fs/fcntl.c
+2
-2
fs/lockd/clntlock.c
fs/lockd/clntlock.c
+1
-1
fs/locks.c
fs/locks.c
+1
-1
fs/namespace.c
fs/namespace.c
+6
-6
fs/partitions/check.c
fs/partitions/check.c
+1
-1
fs/proc/base.c
fs/proc/base.c
+1
-1
fs/proc/inode.c
fs/proc/inode.c
+3
-1
include/linux/sched.h
include/linux/sched.h
+26
-10
include/net/llc_c_ev.h
include/net/llc_c_ev.h
+2
-15
include/net/llc_conn.h
include/net/llc_conn.h
+0
-1
include/net/llc_evnt.h
include/net/llc_evnt.h
+2
-13
include/net/llc_if.h
include/net/llc_if.h
+3
-15
include/net/llc_main.h
include/net/llc_main.h
+0
-2
include/net/llc_s_ev.h
include/net/llc_s_ev.h
+0
-10
include/net/llc_sap.h
include/net/llc_sap.h
+0
-2
init/main.c
init/main.c
+1
-1
kernel/capability.c
kernel/capability.c
+6
-6
kernel/exit.c
kernel/exit.c
+39
-31
kernel/fork.c
kernel/fork.c
+11
-12
kernel/sched.c
kernel/sched.c
+10
-10
kernel/signal.c
kernel/signal.c
+30
-14
kernel/softirq.c
kernel/softirq.c
+1
-2
kernel/suspend.c
kernel/suspend.c
+7
-6
kernel/sys.c
kernel/sys.c
+12
-12
mm/oom_kill.c
mm/oom_kill.c
+9
-7
mm/pdflush.c
mm/pdflush.c
+1
-2
mm/vmscan.c
mm/vmscan.c
+1
-1
net/802/p8022.c
net/802/p8022.c
+2
-12
net/802/psnap.c
net/802/psnap.c
+2
-12
net/ipv4/netfilter/ipt_owner.c
net/ipv4/netfilter/ipt_owner.c
+9
-8
net/ipv6/netfilter/ip6t_owner.c
net/ipv6/netfilter/ip6t_owner.c
+6
-5
net/llc/llc_actn.c
net/llc/llc_actn.c
+0
-3
net/llc/llc_c_ac.c
net/llc/llc_c_ac.c
+2
-30
net/llc/llc_if.c
net/llc/llc_if.c
+21
-25
net/llc/llc_sap.c
net/llc/llc_sap.c
+0
-3
net/llc/llc_sock.c
net/llc/llc_sock.c
+23
-9
No files found.
arch/i386/kernel/cpu/mtrr/generic.c
View file @
f2a5155d
...
@@ -230,6 +230,7 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
...
@@ -230,6 +230,7 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
static
u32
cr4
=
0
;
static
u32
cr4
=
0
;
static
u32
deftype_lo
,
deftype_hi
;
static
u32
deftype_lo
,
deftype_hi
;
static
spinlock_t
set_atomicity_lock
=
SPIN_LOCK_UNLOCKED
;
static
void
prepare_set
(
void
)
static
void
prepare_set
(
void
)
{
{
...
@@ -238,6 +239,7 @@ static void prepare_set(void)
...
@@ -238,6 +239,7 @@ static void prepare_set(void)
/* Note that this is not ideal, since the cache is only flushed/disabled
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
more invasive changes to the way the kernel boots */
spin_lock
(
&
set_atomicity_lock
);
/* Save value of CR4 and clear Page Global Enable (bit 7) */
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if
(
cpu_has_pge
)
{
if
(
cpu_has_pge
)
{
...
@@ -273,7 +275,7 @@ static void post_set(void)
...
@@ -273,7 +275,7 @@ static void post_set(void)
/* Restore value of CR4 */
/* Restore value of CR4 */
if
(
cpu_has_pge
)
if
(
cpu_has_pge
)
write_cr4
(
cr4
);
write_cr4
(
cr4
);
spin_unlock
(
&
set_atomicity_lock
);
}
}
static
void
generic_set_all
(
void
)
static
void
generic_set_all
(
void
)
...
...
arch/i386/kernel/vm86.c
View file @
f2a5155d
...
@@ -608,16 +608,17 @@ static inline void free_vm86_irq(int irqnumber)
...
@@ -608,16 +608,17 @@ static inline void free_vm86_irq(int irqnumber)
static
inline
int
task_valid
(
struct
task_struct
*
tsk
)
static
inline
int
task_valid
(
struct
task_struct
*
tsk
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
int
ret
=
0
;
int
ret
=
0
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
if
((
p
==
tsk
)
&&
(
p
->
sig
))
{
if
((
p
==
tsk
)
&&
(
p
->
sig
))
{
ret
=
1
;
ret
=
1
;
break
;
goto
out
;
}
}
}
while_each_thread
(
g
,
p
);
out:
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
return
ret
;
return
ret
;
}
}
...
...
drivers/char/hvc_console.c
View file @
f2a5155d
...
@@ -286,7 +286,7 @@ int __init hvc_init(void)
...
@@ -286,7 +286,7 @@ int __init hvc_init(void)
panic
(
"Couldn't register hvc console driver
\n
"
);
panic
(
"Couldn't register hvc console driver
\n
"
);
if
(
hvc_driver
.
num
>
0
)
if
(
hvc_driver
.
num
>
0
)
kernel_thread
(
khvcd
,
NULL
,
CLONE_
FS
|
CLONE_FILES
|
CLONE_SIGNA
L
);
kernel_thread
(
khvcd
,
NULL
,
CLONE_
KERNE
L
);
return
0
;
return
0
;
}
}
...
...
drivers/char/sysrq.c
View file @
f2a5155d
...
@@ -299,7 +299,7 @@ static void send_sig_all(int sig)
...
@@ -299,7 +299,7 @@ static void send_sig_all(int sig)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
p
;
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
(
p
->
mm
&&
p
->
pid
!=
1
)
if
(
p
->
mm
&&
p
->
pid
!=
1
)
/* Not swapper, init nor kernel thread */
/* Not swapper, init nor kernel thread */
force_sig
(
sig
,
p
);
force_sig
(
sig
,
p
);
...
...
drivers/char/tty_io.c
View file @
f2a5155d
...
@@ -496,7 +496,7 @@ void do_tty_hangup(void *data)
...
@@ -496,7 +496,7 @@ void do_tty_hangup(void *data)
}
}
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
((
tty
->
session
>
0
)
&&
(
p
->
session
==
tty
->
session
)
&&
if
((
tty
->
session
>
0
)
&&
(
p
->
session
==
tty
->
session
)
&&
p
->
leader
)
{
p
->
leader
)
{
send_sig
(
SIGHUP
,
p
,
1
);
send_sig
(
SIGHUP
,
p
,
1
);
...
@@ -598,7 +598,7 @@ void disassociate_ctty(int on_exit)
...
@@ -598,7 +598,7 @@ void disassociate_ctty(int on_exit)
tty
->
pgrp
=
-
1
;
tty
->
pgrp
=
-
1
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
for_each_
process
(
p
)
if
(
p
->
session
==
current
->
session
)
if
(
p
->
session
==
current
->
session
)
p
->
tty
=
NULL
;
p
->
tty
=
NULL
;
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
...
@@ -1223,7 +1223,7 @@ static void release_dev(struct file * filp)
...
@@ -1223,7 +1223,7 @@ static void release_dev(struct file * filp)
struct
task_struct
*
p
;
struct
task_struct
*
p
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
(
p
->
tty
==
tty
||
(
o_tty
&&
p
->
tty
==
o_tty
))
if
(
p
->
tty
==
tty
||
(
o_tty
&&
p
->
tty
==
o_tty
))
p
->
tty
=
NULL
;
p
->
tty
=
NULL
;
}
}
...
@@ -1561,7 +1561,7 @@ static int tiocsctty(struct tty_struct *tty, int arg)
...
@@ -1561,7 +1561,7 @@ static int tiocsctty(struct tty_struct *tty, int arg)
struct
task_struct
*
p
;
struct
task_struct
*
p
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
for_each_
process
(
p
)
if
(
p
->
tty
==
tty
)
if
(
p
->
tty
==
tty
)
p
->
tty
=
NULL
;
p
->
tty
=
NULL
;
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
...
@@ -1834,7 +1834,7 @@ static void __do_SAK(void *arg)
...
@@ -1834,7 +1834,7 @@ static void __do_SAK(void *arg)
if
(
tty
->
driver
.
flush_buffer
)
if
(
tty
->
driver
.
flush_buffer
)
tty
->
driver
.
flush_buffer
(
tty
);
tty
->
driver
.
flush_buffer
(
tty
);
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
((
p
->
tty
==
tty
)
||
if
((
p
->
tty
==
tty
)
||
((
session
>
0
)
&&
(
p
->
session
==
session
)))
{
((
session
>
0
)
&&
(
p
->
session
==
session
)))
{
printk
(
KERN_NOTICE
"SAK: killed process %d"
printk
(
KERN_NOTICE
"SAK: killed process %d"
...
...
drivers/pnp/pnpbios_core.c
View file @
f2a5155d
...
@@ -1299,7 +1299,7 @@ static int __init pnpbios_thread_init(void)
...
@@ -1299,7 +1299,7 @@ static int __init pnpbios_thread_init(void)
{
{
#ifdef CONFIG_HOTPLUG
#ifdef CONFIG_HOTPLUG
init_completion
(
&
unload_sem
);
init_completion
(
&
unload_sem
);
if
(
kernel_thread
(
pnp_dock_thread
,
NULL
,
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGNAL
)
>
0
)
if
(
kernel_thread
(
pnp_dock_thread
,
NULL
,
CLONE_KERNEL
)
>
0
)
unloading
=
0
;
unloading
=
0
;
#endif
#endif
return
0
;
return
0
;
...
...
fs/exec.c
View file @
f2a5155d
...
@@ -40,6 +40,7 @@
...
@@ -40,6 +40,7 @@
#define __NO_VERSION__
#define __NO_VERSION__
#include <linux/module.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/namei.h>
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/pgalloc.h>
...
@@ -493,50 +494,149 @@ static int exec_mmap(struct mm_struct *mm)
...
@@ -493,50 +494,149 @@ static int exec_mmap(struct mm_struct *mm)
return
0
;
return
0
;
}
}
static
struct
dentry
*
clean_proc_dentry
(
struct
task_struct
*
p
)
{
struct
dentry
*
proc_dentry
=
p
->
proc_dentry
;
if
(
proc_dentry
)
{
spin_lock
(
&
dcache_lock
);
if
(
!
list_empty
(
&
proc_dentry
->
d_hash
))
{
dget_locked
(
proc_dentry
);
list_del_init
(
&
proc_dentry
->
d_hash
);
}
else
proc_dentry
=
NULL
;
spin_unlock
(
&
dcache_lock
);
}
return
proc_dentry
;
}
static
inline
void
put_proc_dentry
(
struct
dentry
*
dentry
)
{
if
(
dentry
)
{
shrink_dcache_parent
(
dentry
);
dput
(
dentry
);
}
}
/*
/*
* This function makes sure the current process has its own signal table,
* This function makes sure the current process has its own signal table,
* so that flush_signal_handlers can later reset the handlers without
* so that flush_signal_handlers can later reset the handlers without
* disturbing other processes. (Other processes might share the signal
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIG
NAL
option to clone().)
* table via the CLONE_SIG
HAND
option to clone().)
*/
*/
static
inline
int
de_thread
(
struct
signal_struct
*
oldsig
)
static
inline
int
make_private_signals
(
void
)
{
{
struct
signal_struct
*
newsig
;
struct
signal_struct
*
newsig
;
int
count
;
remove_thread_group
(
current
,
current
->
sig
);
if
(
atomic_read
(
&
current
->
sig
->
count
)
<=
1
)
if
(
atomic_read
(
&
current
->
sig
->
count
)
<=
1
)
return
0
;
return
0
;
newsig
=
kmem_cache_alloc
(
sigact_cachep
,
GFP_KERNEL
);
newsig
=
kmem_cache_alloc
(
sigact_cachep
,
GFP_KERNEL
);
if
(
newsig
==
NULL
)
if
(
!
newsig
)
return
-
ENOMEM
;
return
-
ENOMEM
;
if
(
list_empty
(
&
current
->
thread_group
))
goto
out
;
/*
* Kill all other threads in the thread group:
*/
spin_lock_irq
(
&
oldsig
->
siglock
);
if
(
oldsig
->
group_exit
)
{
/*
* Another group action in progress, just
* return so that the signal is processed.
*/
spin_unlock_irq
(
&
oldsig
->
siglock
);
kmem_cache_free
(
sigact_cachep
,
newsig
);
return
-
EAGAIN
;
}
oldsig
->
group_exit
=
1
;
__broadcast_thread_group
(
current
,
SIGKILL
);
/*
* Account for the thread group leader hanging around:
*/
count
=
2
;
if
(
current
->
pid
==
current
->
tgid
)
count
=
1
;
while
(
atomic_read
(
&
oldsig
->
count
)
>
count
)
{
oldsig
->
group_exit_task
=
current
;
current
->
state
=
TASK_UNINTERRUPTIBLE
;
spin_unlock_irq
(
&
oldsig
->
siglock
);
schedule
();
spin_lock_irq
(
&
oldsig
->
siglock
);
if
(
oldsig
->
group_exit_task
)
BUG
();
}
spin_unlock_irq
(
&
oldsig
->
siglock
);
/*
* At this point all other threads have exited, all we have to
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
if
(
current
->
pid
!=
current
->
tgid
)
{
struct
task_struct
*
leader
=
current
->
group_leader
;
struct
dentry
*
proc_dentry1
,
*
proc_dentry2
;
unsigned
long
state
;
wait_task_inactive
(
leader
);
write_lock_irq
(
&
tasklist_lock
);
proc_dentry1
=
clean_proc_dentry
(
current
);
proc_dentry2
=
clean_proc_dentry
(
leader
);
if
(
leader
->
tgid
!=
current
->
tgid
)
BUG
();
if
(
current
->
pid
==
current
->
tgid
)
BUG
();
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
* two threads with a switched PID, and release
* the former thread group leader:
*/
unhash_pid
(
current
);
unhash_pid
(
leader
);
leader
->
pid
=
leader
->
tgid
=
current
->
pid
;
current
->
pid
=
current
->
tgid
;
hash_pid
(
current
);
hash_pid
(
leader
);
list_add_tail
(
&
current
->
tasks
,
&
init_task
.
tasks
);
state
=
leader
->
state
;
write_unlock_irq
(
&
tasklist_lock
);
if
(
state
==
TASK_ZOMBIE
)
release_task
(
leader
);
put_proc_dentry
(
proc_dentry1
);
put_proc_dentry
(
proc_dentry2
);
}
out:
spin_lock_init
(
&
newsig
->
siglock
);
spin_lock_init
(
&
newsig
->
siglock
);
atomic_set
(
&
newsig
->
count
,
1
);
atomic_set
(
&
newsig
->
count
,
1
);
newsig
->
group_exit
=
0
;
newsig
->
group_exit
=
0
;
newsig
->
group_exit_code
=
0
;
newsig
->
group_exit_code
=
0
;
newsig
->
group_exit_task
=
NULL
;
memcpy
(
newsig
->
action
,
current
->
sig
->
action
,
sizeof
(
newsig
->
action
));
memcpy
(
newsig
->
action
,
current
->
sig
->
action
,
sizeof
(
newsig
->
action
));
init_sigpending
(
&
newsig
->
shared_pending
);
init_sigpending
(
&
newsig
->
shared_pending
);
remove_thread_group
(
current
,
current
->
sig
);
spin_lock_irq
(
&
current
->
sigmask_lock
);
spin_lock_irq
(
&
current
->
sigmask_lock
);
current
->
sig
=
newsig
;
current
->
sig
=
newsig
;
spin_unlock_irq
(
&
current
->
sigmask_lock
);
spin_unlock_irq
(
&
current
->
sigmask_lock
);
return
0
;
}
/*
* If make_private_signals() made a copy of the signal table, decrement the
* refcount of the original table, and free it if necessary.
* We don't do that in make_private_signals() so that we can back off
* in flush_old_exec() if an error occurs after calling make_private_signals().
*/
static
inline
void
release_old_signals
(
struct
signal_struct
*
oldsig
)
{
if
(
current
->
sig
==
oldsig
)
return
;
if
(
atomic_dec_and_test
(
&
oldsig
->
count
))
if
(
atomic_dec_and_test
(
&
oldsig
->
count
))
kmem_cache_free
(
sigact_cachep
,
oldsig
);
kmem_cache_free
(
sigact_cachep
,
oldsig
);
if
(
!
list_empty
(
&
current
->
thread_group
))
BUG
();
if
(
current
->
tgid
!=
current
->
pid
)
BUG
();
return
0
;
}
}
/*
/*
...
@@ -572,44 +672,27 @@ static inline void flush_old_files(struct files_struct * files)
...
@@ -572,44 +672,27 @@ static inline void flush_old_files(struct files_struct * files)
write_unlock
(
&
files
->
file_lock
);
write_unlock
(
&
files
->
file_lock
);
}
}
/*
* An execve() will automatically "de-thread" the process.
* - if a master thread (PID==TGID) is doing this, then all subsidiary threads
* will be killed (otherwise there will end up being two independent thread
* groups with the same TGID).
* - if a subsidary thread is doing this, then it just leaves the thread group
*/
static
void
de_thread
(
struct
task_struct
*
tsk
)
{
if
(
!
list_empty
(
&
tsk
->
thread_group
))
BUG
();
/* An exec() starts a new thread group: */
tsk
->
tgid
=
tsk
->
pid
;
}
int
flush_old_exec
(
struct
linux_binprm
*
bprm
)
int
flush_old_exec
(
struct
linux_binprm
*
bprm
)
{
{
char
*
name
;
char
*
name
;
int
i
,
ch
,
retval
;
int
i
,
ch
,
retval
;
struct
signal_struct
*
oldsig
;
struct
signal_struct
*
oldsig
=
current
->
sig
;
/*
* Make sure we have a private signal table
*/
oldsig
=
current
->
sig
;
retval
=
make_private_signals
();
if
(
retval
)
goto
flush_failed
;
/*
/*
* Release all of the old mmap stuff
* Release all of the old mmap stuff
*/
*/
retval
=
exec_mmap
(
bprm
->
mm
);
retval
=
exec_mmap
(
bprm
->
mm
);
if
(
retval
)
goto
mmap_failed
;
if
(
retval
)
goto
mmap_failed
;
/*
* Make sure we have a private signal table and that
* we are unassociated from the previous thread group.
*/
retval
=
de_thread
(
oldsig
);
if
(
retval
)
goto
flush_failed
;
/* This is the point of no return */
/* This is the point of no return */
de_thread
(
current
);
release_old_signals
(
oldsig
);
current
->
sas_ss_sp
=
current
->
sas_ss_size
=
0
;
current
->
sas_ss_sp
=
current
->
sas_ss_size
=
0
;
...
...
fs/fcntl.c
View file @
f2a5155d
...
@@ -493,7 +493,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
...
@@ -493,7 +493,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
send_sigio_to_task
(
p
,
fown
,
fd
,
band
);
send_sigio_to_task
(
p
,
fown
,
fd
,
band
);
goto
out_unlock_task
;
goto
out_unlock_task
;
}
}
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
int
match
=
p
->
pid
;
int
match
=
p
->
pid
;
if
(
pid
<
0
)
if
(
pid
<
0
)
match
=
-
p
->
pgrp
;
match
=
-
p
->
pgrp
;
...
@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
...
@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
send_sigurg_to_task
(
p
,
fown
);
send_sigurg_to_task
(
p
,
fown
);
goto
out_unlock_task
;
goto
out_unlock_task
;
}
}
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
int
match
=
p
->
pid
;
int
match
=
p
->
pid
;
if
(
pid
<
0
)
if
(
pid
<
0
)
match
=
-
p
->
pgrp
;
match
=
-
p
->
pgrp
;
...
...
fs/lockd/clntlock.c
View file @
f2a5155d
...
@@ -188,7 +188,7 @@ nlmclnt_recovery(struct nlm_host *host, u32 newstate)
...
@@ -188,7 +188,7 @@ nlmclnt_recovery(struct nlm_host *host, u32 newstate)
nlmclnt_prepare_reclaim
(
host
,
newstate
);
nlmclnt_prepare_reclaim
(
host
,
newstate
);
nlm_get_host
(
host
);
nlm_get_host
(
host
);
MOD_INC_USE_COUNT
;
MOD_INC_USE_COUNT
;
kernel_thread
(
reclaimer
,
host
,
CLONE_
SIGNA
L
);
kernel_thread
(
reclaimer
,
host
,
CLONE_
KERNE
L
);
}
}
}
}
...
...
fs/locks.c
View file @
f2a5155d
...
@@ -1588,7 +1588,7 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 *l)
...
@@ -1588,7 +1588,7 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 *l)
for
(;;)
{
for
(;;)
{
error
=
posix_lock_file
(
filp
,
file_lock
);
error
=
posix_lock_file
(
filp
,
file_lock
);
if
((
error
!=
-
EAGAIN
)
||
(
cmd
==
F_SETLK
))
if
((
error
!=
-
EAGAIN
)
||
(
cmd
==
F_SETLK
64
))
break
;
break
;
error
=
wait_event_interruptible
(
file_lock
->
fl_wait
,
error
=
wait_event_interruptible
(
file_lock
->
fl_wait
,
!
file_lock
->
fl_next
);
!
file_lock
->
fl_next
);
...
...
fs/namespace.c
View file @
f2a5155d
...
@@ -883,11 +883,11 @@ asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
...
@@ -883,11 +883,11 @@ asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
static
void
chroot_fs_refs
(
struct
nameidata
*
old_nd
,
struct
nameidata
*
new_nd
)
static
void
chroot_fs_refs
(
struct
nameidata
*
old_nd
,
struct
nameidata
*
new_nd
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
struct
fs_struct
*
fs
;
struct
fs_struct
*
fs
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
task_lock
(
p
);
task_lock
(
p
);
fs
=
p
->
fs
;
fs
=
p
->
fs
;
if
(
fs
)
{
if
(
fs
)
{
...
@@ -900,7 +900,7 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
...
@@ -900,7 +900,7 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
put_fs_struct
(
fs
);
put_fs_struct
(
fs
);
}
else
}
else
task_unlock
(
p
);
task_unlock
(
p
);
}
}
while_each_thread
(
g
,
p
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
}
}
...
@@ -1012,7 +1012,7 @@ static void __init init_mount_tree(void)
...
@@ -1012,7 +1012,7 @@ static void __init init_mount_tree(void)
{
{
struct
vfsmount
*
mnt
;
struct
vfsmount
*
mnt
;
struct
namespace
*
namespace
;
struct
namespace
*
namespace
;
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
mnt
=
do_kern_mount
(
"rootfs"
,
0
,
"rootfs"
,
NULL
);
mnt
=
do_kern_mount
(
"rootfs"
,
0
,
"rootfs"
,
NULL
);
if
(
IS_ERR
(
mnt
))
if
(
IS_ERR
(
mnt
))
...
@@ -1028,10 +1028,10 @@ static void __init init_mount_tree(void)
...
@@ -1028,10 +1028,10 @@ static void __init init_mount_tree(void)
init_task
.
namespace
=
namespace
;
init_task
.
namespace
=
namespace
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
get_namespace
(
namespace
);
get_namespace
(
namespace
);
p
->
namespace
=
namespace
;
p
->
namespace
=
namespace
;
}
}
while_each_thread
(
g
,
p
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
set_fs_pwd
(
current
->
fs
,
namespace
->
root
,
namespace
->
root
->
mnt_root
);
set_fs_pwd
(
current
->
fs
,
namespace
->
root
,
namespace
->
root
->
mnt_root
);
...
...
fs/partitions/check.c
View file @
f2a5155d
...
@@ -251,7 +251,7 @@ static void check_partition(struct gendisk *hd, struct block_device *bdev)
...
@@ -251,7 +251,7 @@ static void check_partition(struct gendisk *hd, struct block_device *bdev)
p
[
j
-
1
].
start_sect
=
state
->
parts
[
j
].
from
;
p
[
j
-
1
].
start_sect
=
state
->
parts
[
j
].
from
;
p
[
j
-
1
].
nr_sects
=
state
->
parts
[
j
].
size
;
p
[
j
-
1
].
nr_sects
=
state
->
parts
[
j
].
size
;
#if CONFIG_BLK_DEV_MD
#if CONFIG_BLK_DEV_MD
if
(
!
state
->
parts
[
j
-
1
].
flags
)
if
(
!
state
->
parts
[
j
].
flags
)
continue
;
continue
;
md_autodetect_dev
(
bdev
->
bd_dev
+
j
);
md_autodetect_dev
(
bdev
->
bd_dev
+
j
);
#endif
#endif
...
...
fs/proc/base.c
View file @
f2a5155d
...
@@ -1136,7 +1136,7 @@ static int get_pid_list(int index, unsigned int *pids)
...
@@ -1136,7 +1136,7 @@ static int get_pid_list(int index, unsigned int *pids)
index
--
;
index
--
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
int
pid
=
p
->
pid
;
int
pid
=
p
->
pid
;
if
(
!
pid
)
if
(
!
pid
)
continue
;
continue
;
...
...
fs/proc/inode.c
View file @
f2a5155d
...
@@ -235,7 +235,9 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
...
@@ -235,7 +235,9 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
* Fixup the root inode's nlink value
* Fixup the root inode's nlink value
*/
*/
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
if
(
p
->
pid
)
root_inode
->
i_nlink
++
;
for_each_process
(
p
)
if
(
p
->
pid
)
root_inode
->
i_nlink
++
;
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
s
->
s_root
=
d_alloc_root
(
root_inode
);
s
->
s_root
=
d_alloc_root
(
root_inode
);
if
(
!
s
->
s_root
)
if
(
!
s
->
s_root
)
...
...
include/linux/sched.h
View file @
f2a5155d
...
@@ -51,7 +51,11 @@ struct exec_domain;
...
@@ -51,7 +51,11 @@ struct exec_domain;
#define CLONE_CLEARTID 0x00200000
/* clear the userspace TID */
#define CLONE_CLEARTID 0x00200000
/* clear the userspace TID */
#define CLONE_DETACHED 0x00400000
/* parent wants no child-exit signal */
#define CLONE_DETACHED 0x00400000
/* parent wants no child-exit signal */
#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
/*
* List of flags we want to share for kernel threads,
* if only because they are not used by them anyway.
*/
#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
/*
/*
* These are the constant used to fake the fixed-point load-average
* These are the constant used to fake the fixed-point load-average
...
@@ -222,6 +226,8 @@ struct signal_struct {
...
@@ -222,6 +226,8 @@ struct signal_struct {
/* thread group exit support */
/* thread group exit support */
int
group_exit
;
int
group_exit
;
int
group_exit_code
;
int
group_exit_code
;
struct
task_struct
*
group_exit_task
;
};
};
/*
/*
...
@@ -552,6 +558,7 @@ extern int dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t
...
@@ -552,6 +558,7 @@ extern int dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t
extern
void
block_all_signals
(
int
(
*
notifier
)(
void
*
priv
),
void
*
priv
,
extern
void
block_all_signals
(
int
(
*
notifier
)(
void
*
priv
),
void
*
priv
,
sigset_t
*
mask
);
sigset_t
*
mask
);
extern
void
unblock_all_signals
(
void
);
extern
void
unblock_all_signals
(
void
);
extern
void
release_task
(
struct
task_struct
*
p
);
extern
int
send_sig_info
(
int
,
struct
siginfo
*
,
struct
task_struct
*
);
extern
int
send_sig_info
(
int
,
struct
siginfo
*
,
struct
task_struct
*
);
extern
int
force_sig_info
(
int
,
struct
siginfo
*
,
struct
task_struct
*
);
extern
int
force_sig_info
(
int
,
struct
siginfo
*
,
struct
task_struct
*
);
extern
int
__kill_pg_info
(
int
sig
,
struct
siginfo
*
info
,
pid_t
pgrp
);
extern
int
__kill_pg_info
(
int
sig
,
struct
siginfo
*
info
,
pid_t
pgrp
);
...
@@ -761,11 +768,13 @@ static inline void remove_wait_queue_locked(wait_queue_head_t *q,
...
@@ -761,11 +768,13 @@ static inline void remove_wait_queue_locked(wait_queue_head_t *q,
#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children)
#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children)
#define REMOVE_LINKS(p) do { \
#define REMOVE_LINKS(p) do { \
if (thread_group_leader(p)) \
list_del_init(&(p)->tasks); \
list_del_init(&(p)->tasks); \
remove_parent(p); \
remove_parent(p); \
} while (0)
} while (0)
#define SET_LINKS(p) do { \
#define SET_LINKS(p) do { \
if (thread_group_leader(p)) \
list_add_tail(&(p)->tasks,&init_task.tasks); \
list_add_tail(&(p)->tasks,&init_task.tasks); \
add_parent(p, (p)->parent); \
add_parent(p, (p)->parent); \
} while (0)
} while (0)
...
@@ -797,11 +806,18 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
...
@@ -797,11 +806,18 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
#define for_each_
task
(p) \
#define for_each_
process
(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
#define for_each_thread(task) \
/*
for (task = next_thread(current) ; task != current ; task = next_thread(task))
* Careful: do_each_thread/while_each_thread is a double loop so
* 'break' will not work as expected - use goto instead.
*/
#define do_each_thread(g, t) \
for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
static
inline
task_t
*
next_thread
(
task_t
*
p
)
static
inline
task_t
*
next_thread
(
task_t
*
p
)
{
{
...
...
include/net/llc_c_ev.h
View file @
f2a5155d
...
@@ -126,23 +126,10 @@ struct llc_conn_ev_pdu_if {
...
@@ -126,23 +126,10 @@ struct llc_conn_ev_pdu_if {
u8
reason
;
u8
reason
;
};
};
/* Event interface for timer-generated events */
struct
llc_conn_ev_tmr_if
{
struct
sock
*
sk
;
u32
component_handle
;
void
*
timer_specific
;
};
struct
llc_conn_ev_rpt_sts_if
{
u8
status
;
};
union
llc_conn_ev_if
{
union
llc_conn_ev_if
{
struct
llc_conn_ev_simple_if
a
;
/* 'a' for simple, easy ... */
struct
llc_conn_ev_simple_if
a
;
/* 'a' for simple, easy ... */
struct
llc_conn_ev_prim_if
prim
;
struct
llc_conn_ev_prim_if
prim
;
struct
llc_conn_ev_pdu_if
pdu
;
struct
llc_conn_ev_pdu_if
pdu
;
struct
llc_conn_ev_tmr_if
tmr
;
struct
llc_conn_ev_rpt_sts_if
rsts
;
/* report status */
};
};
struct
llc_conn_state_ev
{
struct
llc_conn_state_ev
{
...
...
include/net/llc_conn.h
View file @
f2a5155d
...
@@ -17,7 +17,6 @@
...
@@ -17,7 +17,6 @@
struct
llc_timer
{
struct
llc_timer
{
struct
timer_list
timer
;
struct
timer_list
timer
;
u8
running
;
/* timer is running or no */
u16
expire
;
/* timer expire time */
u16
expire
;
/* timer expire time */
};
};
...
...
include/net/llc_evnt.h
View file @
f2a5155d
...
@@ -43,23 +43,12 @@ struct llc_stat_ev_prim_if {
...
@@ -43,23 +43,12 @@ struct llc_stat_ev_prim_if {
struct
llc_stat_ev_pdu_if
{
struct
llc_stat_ev_pdu_if
{
u8
reason
;
u8
reason
;
struct
sk_buff
*
skb
;
};
struct
llc_stat_ev_tmr_if
{
void
*
timer_specific
;
};
struct
llc_stat_ev_rpt_sts_if
{
u8
status
;
};
};
union
llc_stat_ev_if
{
union
llc_stat_ev_if
{
struct
llc_stat_ev_simple_if
a
;
/* 'a' for simple, easy ... */
struct
llc_stat_ev_simple_if
a
;
/* 'a' for simple, easy ... */
struct
llc_stat_ev_prim_if
prim
;
struct
llc_stat_ev_prim_if
prim
;
struct
llc_stat_ev_pdu_if
pdu
;
struct
llc_stat_ev_pdu_if
pdu
;
struct
llc_stat_ev_tmr_if
tmr
;
struct
llc_stat_ev_rpt_sts_if
rsts
;
/* report status */
};
};
struct
llc_station_state_ev
{
struct
llc_station_state_ev
{
...
...
include/net/llc_if.h
View file @
f2a5155d
...
@@ -122,24 +122,12 @@ extern int llc_establish_connection(struct sock *sk, u8 *lmac,
...
@@ -122,24 +122,12 @@ extern int llc_establish_connection(struct sock *sk, u8 *lmac,
extern
int
llc_build_and_send_pkt
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
int
llc_build_and_send_pkt
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
void
llc_build_and_send_ui_pkt
(
struct
llc_sap
*
sap
,
extern
void
llc_build_and_send_ui_pkt
(
struct
llc_sap
*
sap
,
struct
sk_buff
*
skb
,
struct
sk_buff
*
skb
,
struct
sockaddr_llc
*
addr
);
u8
*
dmac
,
u8
dsap
);
extern
void
llc_build_and_send_xid_pkt
(
struct
llc_sap
*
sap
,
extern
void
llc_build_and_send_xid_pkt
(
struct
llc_sap
*
sap
,
struct
sk_buff
*
skb
,
struct
sk_buff
*
skb
,
struct
sockaddr_llc
*
addr
);
u8
*
dmac
,
u8
dsap
);
extern
void
llc_build_and_send_test_pkt
(
struct
llc_sap
*
sap
,
extern
void
llc_build_and_send_test_pkt
(
struct
llc_sap
*
sap
,
struct
sk_buff
*
skb
,
struct
sk_buff
*
skb
,
struct
sockaddr_llc
*
addr
);
u8
*
dmac
,
u8
dsap
);
extern
int
llc_send_disc
(
struct
sock
*
sk
);
extern
int
llc_send_disc
(
struct
sock
*
sk
);
/**
* llc_proto_type - return eth protocol for ARP header type
* @arphrd: ARP header type.
*
* Given an ARP header type return the corresponding ethernet protocol.
*/
static
__inline__
u16
llc_proto_type
(
u16
arphrd
)
{
return
arphrd
==
ARPHRD_IEEE802_TR
?
htons
(
ETH_P_TR_802_2
)
:
htons
(
ETH_P_802_2
);
}
#endif
/* LLC_IF_H */
#endif
/* LLC_IF_H */
include/net/llc_main.h
View file @
f2a5155d
...
@@ -30,7 +30,6 @@
...
@@ -30,7 +30,6 @@
*
*
* @state - state of station
* @state - state of station
* @xid_r_count - XID response PDU counter
* @xid_r_count - XID response PDU counter
* @ack_tmr_running - 1 or 0
* @mac_sa - MAC source address
* @mac_sa - MAC source address
* @sap_list - list of related SAPs
* @sap_list - list of related SAPs
* @ev_q - events entering state mach.
* @ev_q - events entering state mach.
...
@@ -40,7 +39,6 @@ struct llc_station {
...
@@ -40,7 +39,6 @@ struct llc_station {
u8
state
;
u8
state
;
u8
xid_r_count
;
u8
xid_r_count
;
struct
timer_list
ack_timer
;
struct
timer_list
ack_timer
;
u8
ack_tmr_running
;
u8
retry_count
;
u8
retry_count
;
u8
maximum_retry
;
u8
maximum_retry
;
u8
mac_sa
[
6
];
u8
mac_sa
[
6
];
...
...
include/net/llc_s_ev.h
View file @
f2a5155d
...
@@ -52,20 +52,10 @@ struct llc_sap_ev_pdu_if {
...
@@ -52,20 +52,10 @@ struct llc_sap_ev_pdu_if {
u8
reason
;
u8
reason
;
};
};
struct
llc_sap_ev_tmr_if
{
void
*
timer_specific
;
};
struct
llc_sap_ev_rpt_sts_if
{
u8
status
;
};
union
llc_sap_ev_if
{
union
llc_sap_ev_if
{
struct
llc_sap_ev_simple_if
a
;
/* 'a' for simple, easy ... */
struct
llc_sap_ev_simple_if
a
;
/* 'a' for simple, easy ... */
struct
llc_sap_ev_prim_if
prim
;
struct
llc_sap_ev_prim_if
prim
;
struct
llc_sap_ev_pdu_if
pdu
;
struct
llc_sap_ev_pdu_if
pdu
;
struct
llc_sap_ev_tmr_if
tmr
;
struct
llc_sap_ev_rpt_sts_if
rsts
;
/* report status */
};
};
struct
llc_prim_if_block
;
struct
llc_prim_if_block
;
...
...
include/net/llc_sap.h
View file @
f2a5155d
...
@@ -17,7 +17,6 @@
...
@@ -17,7 +17,6 @@
*
*
* @p_bit - only lowest-order bit used
* @p_bit - only lowest-order bit used
* @f_bit - only lowest-order bit used
* @f_bit - only lowest-order bit used
* @req - provided by LLC layer
* @ind - provided by network layer
* @ind - provided by network layer
* @conf - provided by network layer
* @conf - provided by network layer
* @laddr - SAP value in this 'lsap'
* @laddr - SAP value in this 'lsap'
...
@@ -30,7 +29,6 @@ struct llc_sap {
...
@@ -30,7 +29,6 @@ struct llc_sap {
u8
state
;
u8
state
;
u8
p_bit
;
u8
p_bit
;
u8
f_bit
;
u8
f_bit
;
llc_prim_call_t
req
;
llc_prim_call_t
ind
;
llc_prim_call_t
ind
;
llc_prim_call_t
conf
;
llc_prim_call_t
conf
;
struct
llc_prim_if_block
llc_ind_prim
,
llc_cfm_prim
;
struct
llc_prim_if_block
llc_ind_prim
,
llc_cfm_prim
;
...
...
init/main.c
View file @
f2a5155d
...
@@ -371,7 +371,7 @@ static void __init smp_init(void)
...
@@ -371,7 +371,7 @@ static void __init smp_init(void)
static
void
rest_init
(
void
)
static
void
rest_init
(
void
)
{
{
kernel_thread
(
init
,
NULL
,
CLONE_
FS
|
CLONE_FILES
|
CLONE_SIGNA
L
);
kernel_thread
(
init
,
NULL
,
CLONE_
KERNE
L
);
unlock_kernel
();
unlock_kernel
();
cpu_idle
();
cpu_idle
();
}
}
...
...
kernel/capability.c
View file @
f2a5155d
...
@@ -83,13 +83,13 @@ static inline void cap_set_pg(int pgrp, kernel_cap_t *effective,
...
@@ -83,13 +83,13 @@ static inline void cap_set_pg(int pgrp, kernel_cap_t *effective,
kernel_cap_t
*
inheritable
,
kernel_cap_t
*
inheritable
,
kernel_cap_t
*
permitted
)
kernel_cap_t
*
permitted
)
{
{
task_t
*
target
;
task_t
*
g
,
*
target
;
for_each_task
(
target
)
{
do_each_thread
(
g
,
target
)
{
if
(
target
->
pgrp
!=
pgrp
)
if
(
target
->
pgrp
!=
pgrp
)
continue
;
continue
;
security_ops
->
capset_set
(
target
,
effective
,
inheritable
,
permitted
);
security_ops
->
capset_set
(
target
,
effective
,
inheritable
,
permitted
);
}
}
while_each_thread
(
g
,
target
);
}
}
/*
/*
...
@@ -100,13 +100,13 @@ static inline void cap_set_all(kernel_cap_t *effective,
...
@@ -100,13 +100,13 @@ static inline void cap_set_all(kernel_cap_t *effective,
kernel_cap_t
*
inheritable
,
kernel_cap_t
*
inheritable
,
kernel_cap_t
*
permitted
)
kernel_cap_t
*
permitted
)
{
{
task_t
*
target
;
task_t
*
g
,
*
target
;
for_each_task
(
target
)
{
do_each_thread
(
g
,
target
)
{
if
(
target
==
current
||
target
->
pid
==
1
)
if
(
target
==
current
||
target
->
pid
==
1
)
continue
;
continue
;
security_ops
->
capset_set
(
target
,
effective
,
inheritable
,
permitted
);
security_ops
->
capset_set
(
target
,
effective
,
inheritable
,
permitted
);
}
}
while_each_thread
(
g
,
target
);
}
}
/*
/*
...
...
kernel/exit.c
View file @
f2a5155d
...
@@ -49,7 +49,7 @@ static struct dentry * __unhash_process(struct task_struct *p)
...
@@ -49,7 +49,7 @@ static struct dentry * __unhash_process(struct task_struct *p)
return
proc_dentry
;
return
proc_dentry
;
}
}
static
void
release_task
(
struct
task_struct
*
p
)
void
release_task
(
struct
task_struct
*
p
)
{
{
struct
dentry
*
proc_dentry
;
struct
dentry
*
proc_dentry
;
...
@@ -71,19 +71,19 @@ static void release_task(struct task_struct * p)
...
@@ -71,19 +71,19 @@ static void release_task(struct task_struct * p)
write_lock_irq
(
&
tasklist_lock
);
write_lock_irq
(
&
tasklist_lock
);
__exit_sighand
(
p
);
__exit_sighand
(
p
);
proc_dentry
=
__unhash_process
(
p
);
proc_dentry
=
__unhash_process
(
p
);
p
->
parent
->
cutime
+=
p
->
utime
+
p
->
cutime
;
p
->
parent
->
cstime
+=
p
->
stime
+
p
->
cstime
;
p
->
parent
->
cmin_flt
+=
p
->
min_flt
+
p
->
cmin_flt
;
p
->
parent
->
cmaj_flt
+=
p
->
maj_flt
+
p
->
cmaj_flt
;
p
->
parent
->
cnswap
+=
p
->
nswap
+
p
->
cnswap
;
sched_exit
(
p
);
write_unlock_irq
(
&
tasklist_lock
);
write_unlock_irq
(
&
tasklist_lock
);
if
(
unlikely
(
proc_dentry
!=
NULL
))
{
if
(
unlikely
(
proc_dentry
!=
NULL
))
{
shrink_dcache_parent
(
proc_dentry
);
shrink_dcache_parent
(
proc_dentry
);
dput
(
proc_dentry
);
dput
(
proc_dentry
);
}
}
release_thread
(
p
);
release_thread
(
p
);
if
(
p
!=
current
)
{
current
->
cmin_flt
+=
p
->
min_flt
+
p
->
cmin_flt
;
current
->
cmaj_flt
+=
p
->
maj_flt
+
p
->
cmaj_flt
;
current
->
cnswap
+=
p
->
nswap
+
p
->
cnswap
;
sched_exit
(
p
);
}
put_task_struct
(
p
);
put_task_struct
(
p
);
}
}
...
@@ -115,7 +115,7 @@ int session_of_pgrp(int pgrp)
...
@@ -115,7 +115,7 @@ int session_of_pgrp(int pgrp)
fallback
=
-
1
;
fallback
=
-
1
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
(
p
->
session
<=
0
)
if
(
p
->
session
<=
0
)
continue
;
continue
;
if
(
p
->
pgrp
==
pgrp
)
{
if
(
p
->
pgrp
==
pgrp
)
{
...
@@ -141,7 +141,7 @@ static int __will_become_orphaned_pgrp(int pgrp, struct task_struct * ignored_ta
...
@@ -141,7 +141,7 @@ static int __will_become_orphaned_pgrp(int pgrp, struct task_struct * ignored_ta
{
{
struct
task_struct
*
p
;
struct
task_struct
*
p
;
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
((
p
==
ignored_task
)
||
(
p
->
pgrp
!=
pgrp
)
||
if
((
p
==
ignored_task
)
||
(
p
->
pgrp
!=
pgrp
)
||
(
p
->
state
==
TASK_ZOMBIE
)
||
(
p
->
state
==
TASK_ZOMBIE
)
||
(
p
->
parent
->
pid
==
1
))
(
p
->
parent
->
pid
==
1
))
...
@@ -175,7 +175,7 @@ static inline int __has_stopped_jobs(int pgrp)
...
@@ -175,7 +175,7 @@ static inline int __has_stopped_jobs(int pgrp)
int
retval
=
0
;
int
retval
=
0
;
struct
task_struct
*
p
;
struct
task_struct
*
p
;
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
(
p
->
pgrp
!=
pgrp
)
if
(
p
->
pgrp
!=
pgrp
)
continue
;
continue
;
if
(
p
->
state
!=
TASK_STOPPED
)
if
(
p
->
state
!=
TASK_STOPPED
)
...
@@ -447,11 +447,7 @@ static inline void forget_original_parent(struct task_struct * father)
...
@@ -447,11 +447,7 @@ static inline void forget_original_parent(struct task_struct * father)
struct
task_struct
*
p
,
*
reaper
=
father
;
struct
task_struct
*
p
,
*
reaper
=
father
;
struct
list_head
*
_p
;
struct
list_head
*
_p
;
if
(
father
->
exit_signal
!=
-
1
)
reaper
=
father
->
group_leader
;
reaper
=
prev_thread
(
reaper
);
else
reaper
=
child_reaper
;
if
(
reaper
==
father
)
if
(
reaper
==
father
)
reaper
=
child_reaper
;
reaper
=
child_reaper
;
...
@@ -681,6 +677,9 @@ asmlinkage long sys_exit(int error_code)
...
@@ -681,6 +677,9 @@ asmlinkage long sys_exit(int error_code)
*/
*/
asmlinkage
long
sys_exit_group
(
int
error_code
)
asmlinkage
long
sys_exit_group
(
int
error_code
)
{
{
unsigned
int
exit_code
=
(
error_code
&
0xff
)
<<
8
;
if
(
!
list_empty
(
&
current
->
thread_group
))
{
struct
signal_struct
*
sig
=
current
->
sig
;
struct
signal_struct
*
sig
=
current
->
sig
;
spin_lock_irq
(
&
sig
->
siglock
);
spin_lock_irq
(
&
sig
->
siglock
);
...
@@ -691,11 +690,12 @@ asmlinkage long sys_exit_group(int error_code)
...
@@ -691,11 +690,12 @@ asmlinkage long sys_exit_group(int error_code)
do_exit
(
sig
->
group_exit_code
);
do_exit
(
sig
->
group_exit_code
);
}
}
sig
->
group_exit
=
1
;
sig
->
group_exit
=
1
;
sig
->
group_exit_code
=
(
error_code
&
0xff
)
<<
8
;
sig
->
group_exit_code
=
exit_code
;
__broadcast_thread_group
(
current
,
SIGKILL
);
__broadcast_thread_group
(
current
,
SIGKILL
);
spin_unlock_irq
(
&
sig
->
siglock
);
spin_unlock_irq
(
&
sig
->
siglock
);
}
do_exit
(
sig
->
group_
exit_code
);
do_exit
(
exit_code
);
}
}
static
int
eligible_child
(
pid_t
pid
,
int
options
,
task_t
*
p
)
static
int
eligible_child
(
pid_t
pid
,
int
options
,
task_t
*
p
)
...
@@ -731,7 +731,7 @@ static int eligible_child(pid_t pid, int options, task_t *p)
...
@@ -731,7 +731,7 @@ static int eligible_child(pid_t pid, int options, task_t *p)
* in a non-empty thread group:
* in a non-empty thread group:
*/
*/
if
(
current
->
tgid
!=
p
->
tgid
&&
delay_group_leader
(
p
))
if
(
current
->
tgid
!=
p
->
tgid
&&
delay_group_leader
(
p
))
return
0
;
return
2
;
if
(
security_ops
->
task_wait
(
p
))
if
(
security_ops
->
task_wait
(
p
))
return
0
;
return
0
;
...
@@ -757,11 +757,16 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
...
@@ -757,11 +757,16 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
do
{
do
{
struct
task_struct
*
p
;
struct
task_struct
*
p
;
struct
list_head
*
_p
;
struct
list_head
*
_p
;
int
ret
;
list_for_each
(
_p
,
&
tsk
->
children
)
{
list_for_each
(
_p
,
&
tsk
->
children
)
{
p
=
list_entry
(
_p
,
struct
task_struct
,
sibling
);
p
=
list_entry
(
_p
,
struct
task_struct
,
sibling
);
if
(
!
eligible_child
(
pid
,
options
,
p
))
ret
=
eligible_child
(
pid
,
options
,
p
);
if
(
!
ret
)
continue
;
continue
;
flag
=
1
;
flag
=
1
;
switch
(
p
->
state
)
{
switch
(
p
->
state
)
{
case
TASK_STOPPED
:
case
TASK_STOPPED
:
if
(
!
p
->
exit_code
)
if
(
!
p
->
exit_code
)
...
@@ -784,8 +789,11 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
...
@@ -784,8 +789,11 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
}
}
goto
end_wait4
;
goto
end_wait4
;
case
TASK_ZOMBIE
:
case
TASK_ZOMBIE
:
current
->
cutime
+=
p
->
utime
+
p
->
cutime
;
/*
current
->
cstime
+=
p
->
stime
+
p
->
cstime
;
* Eligible but we cannot release it yet:
*/
if
(
ret
==
2
)
continue
;
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
retval
=
ru
?
getrusage
(
p
,
RUSAGE_BOTH
,
ru
)
:
0
;
retval
=
ru
?
getrusage
(
p
,
RUSAGE_BOTH
,
ru
)
:
0
;
if
(
!
retval
&&
stat_addr
)
{
if
(
!
retval
&&
stat_addr
)
{
...
...
kernel/fork.c
View file @
f2a5155d
...
@@ -161,7 +161,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
...
@@ -161,7 +161,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
static
int
get_pid
(
unsigned
long
flags
)
static
int
get_pid
(
unsigned
long
flags
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
int
pid
;
int
pid
;
if
(
flags
&
CLONE_IDLETASK
)
if
(
flags
&
CLONE_IDLETASK
)
...
@@ -178,7 +178,7 @@ static int get_pid(unsigned long flags)
...
@@ -178,7 +178,7 @@ static int get_pid(unsigned long flags)
next_safe
=
pid_max
;
next_safe
=
pid_max
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
repeat:
repeat:
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
if
(
p
->
pid
==
last_pid
||
if
(
p
->
pid
==
last_pid
||
p
->
pgrp
==
last_pid
||
p
->
pgrp
==
last_pid
||
p
->
session
==
last_pid
)
{
p
->
session
==
last_pid
)
{
...
@@ -195,7 +195,8 @@ static int get_pid(unsigned long flags)
...
@@ -195,7 +195,8 @@ static int get_pid(unsigned long flags)
next_safe
=
p
->
pgrp
;
next_safe
=
p
->
pgrp
;
if
(
p
->
session
>
last_pid
&&
next_safe
>
p
->
session
)
if
(
p
->
session
>
last_pid
&&
next_safe
>
p
->
session
)
next_safe
=
p
->
session
;
next_safe
=
p
->
session
;
}
}
while_each_thread
(
g
,
p
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
}
}
pid
=
last_pid
;
pid
=
last_pid
;
...
@@ -632,6 +633,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
...
@@ -632,6 +633,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
atomic_set
(
&
sig
->
count
,
1
);
atomic_set
(
&
sig
->
count
,
1
);
sig
->
group_exit
=
0
;
sig
->
group_exit
=
0
;
sig
->
group_exit_code
=
0
;
sig
->
group_exit_code
=
0
;
sig
->
group_exit_task
=
NULL
;
memcpy
(
sig
->
action
,
current
->
sig
->
action
,
sizeof
(
sig
->
action
));
memcpy
(
sig
->
action
,
current
->
sig
->
action
,
sizeof
(
sig
->
action
));
sig
->
curr_target
=
NULL
;
sig
->
curr_target
=
NULL
;
init_sigpending
(
&
sig
->
shared_pending
);
init_sigpending
(
&
sig
->
shared_pending
);
...
@@ -671,16 +673,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
...
@@ -671,16 +673,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
return
ERR_PTR
(
-
EINVAL
);
return
ERR_PTR
(
-
EINVAL
);
/*
/*
* Thread groups must share signals as well:
* Thread groups must share signals as well, and detached threads
*/
* can only be started up within the thread group.
if
(
clone_flags
&
CLONE_THREAD
)
clone_flags
|=
CLONE_SIGHAND
;
/*
* Detached threads can only be started up within the thread
* group.
*/
*/
if
(
clone_flags
&
CLONE_DETACHED
)
if
((
clone_flags
&
CLONE_THREAD
)
&&
!
(
clone_flags
&
CLONE_SIGHAND
))
clone_flags
|=
CLONE_THREAD
;
return
ERR_PTR
(
-
EINVAL
);
if
((
clone_flags
&
CLONE_DETACHED
)
&&
!
(
clone_flags
&
CLONE_THREAD
))
return
ERR_PTR
(
-
EINVAL
);
retval
=
security_ops
->
task_create
(
clone_flags
);
retval
=
security_ops
->
task_create
(
clone_flags
);
if
(
retval
)
if
(
retval
)
...
...
kernel/sched.c
View file @
f2a5155d
...
@@ -479,17 +479,17 @@ void sched_exit(task_t * p)
...
@@ -479,17 +479,17 @@ void sched_exit(task_t * p)
{
{
local_irq_disable
();
local_irq_disable
();
if
(
p
->
first_time_slice
)
{
if
(
p
->
first_time_slice
)
{
cur
rent
->
time_slice
+=
p
->
time_slice
;
p
->
pa
rent
->
time_slice
+=
p
->
time_slice
;
if
(
unlikely
(
cur
rent
->
time_slice
>
MAX_TIMESLICE
))
if
(
unlikely
(
p
->
pa
rent
->
time_slice
>
MAX_TIMESLICE
))
cur
rent
->
time_slice
=
MAX_TIMESLICE
;
p
->
pa
rent
->
time_slice
=
MAX_TIMESLICE
;
}
}
local_irq_enable
();
local_irq_enable
();
/*
/*
* If the child was a (relative-) CPU hog then decrease
* If the child was a (relative-) CPU hog then decrease
* the sleep_avg of the parent as well.
* the sleep_avg of the parent as well.
*/
*/
if
(
p
->
sleep_avg
<
cur
rent
->
sleep_avg
)
if
(
p
->
sleep_avg
<
p
->
pa
rent
->
sleep_avg
)
current
->
sleep_avg
=
(
cur
rent
->
sleep_avg
*
EXIT_WEIGHT
+
p
->
parent
->
sleep_avg
=
(
p
->
pa
rent
->
sleep_avg
*
EXIT_WEIGHT
+
p
->
sleep_avg
)
/
(
EXIT_WEIGHT
+
1
);
p
->
sleep_avg
)
/
(
EXIT_WEIGHT
+
1
);
}
}
...
@@ -1838,7 +1838,7 @@ char * render_sigset_t(sigset_t *set, char *buffer)
...
@@ -1838,7 +1838,7 @@ char * render_sigset_t(sigset_t *set, char *buffer)
void
show_state
(
void
)
void
show_state
(
void
)
{
{
task_t
*
p
;
task_t
*
g
,
*
p
;
#if (BITS_PER_LONG == 32)
#if (BITS_PER_LONG == 32)
printk
(
"
\n
"
printk
(
"
\n
"
...
@@ -1850,14 +1850,15 @@ void show_state(void)
...
@@ -1850,14 +1850,15 @@ void show_state(void)
printk
(
" task PC stack pid father child younger older
\n
"
);
printk
(
" task PC stack pid father child younger older
\n
"
);
#endif
#endif
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
/*
/*
* reset the NMI-timeout, listing all files on a slow
* reset the NMI-timeout, listing all files on a slow
* console might take alot of time:
* console might take alot of time:
*/
*/
touch_nmi_watchdog
();
touch_nmi_watchdog
();
show_task
(
p
);
show_task
(
p
);
}
}
while_each_thread
(
g
,
p
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
}
}
...
@@ -2054,8 +2055,7 @@ static int migration_call(struct notifier_block *nfb,
...
@@ -2054,8 +2055,7 @@ static int migration_call(struct notifier_block *nfb,
case
CPU_ONLINE
:
case
CPU_ONLINE
:
printk
(
"Starting migration thread for cpu %li
\n
"
,
printk
(
"Starting migration thread for cpu %li
\n
"
,
(
long
)
hcpu
);
(
long
)
hcpu
);
kernel_thread
(
migration_thread
,
hcpu
,
kernel_thread
(
migration_thread
,
hcpu
,
CLONE_KERNEL
);
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGNAL
);
while
(
!
cpu_rq
((
long
)
hcpu
)
->
migration_thread
)
while
(
!
cpu_rq
((
long
)
hcpu
)
->
migration_thread
)
yield
();
yield
();
break
;
break
;
...
...
kernel/signal.c
View file @
f2a5155d
...
@@ -118,14 +118,18 @@ int max_queued_signals = 1024;
...
@@ -118,14 +118,18 @@ int max_queued_signals = 1024;
#define T(sig, mask) \
#define T(sig, mask) \
((1UL << (sig)) & mask)
((1UL << (sig)) & mask)
#define sig_user_specific(sig) T(sig, SIG_USER_SPECIFIC_MASK)
#define sig_user_specific(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_USER_SPECIFIC_MASK))
#define sig_user_load_balance(sig) \
#define sig_user_load_balance(sig) \
(T(sig, SIG_USER_LOAD_BALANCE_MASK) || ((sig) >= SIGRTMIN))
(((sig) >= SIGRTMIN) || T(sig, SIG_USER_LOAD_BALANCE_MASK))
#define sig_kernel_specific(sig) T(sig, SIG_KERNEL_SPECIFIC_MASK)
#define sig_kernel_specific(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_SPECIFIC_MASK))
#define sig_kernel_broadcast(sig) \
#define sig_kernel_broadcast(sig) \
(T(sig, SIG_KERNEL_BROADCAST_MASK) || ((sig) >= SIGRTMIN))
(((sig) >= SIGRTMIN) || T(sig, SIG_KERNEL_BROADCAST_MASK))
#define sig_kernel_only(sig) T(sig, SIG_KERNEL_ONLY_MASK)
#define sig_kernel_only(sig) \
#define sig_kernel_coredump(sig) T(sig, SIG_KERNEL_COREDUMP_MASK)
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
#define sig_kernel_coredump(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
#define sig_user_defined(t, sig) \
#define sig_user_defined(t, sig) \
(((t)->sig->action[(sig)-1].sa.sa_handler != SIG_DFL) && \
(((t)->sig->action[(sig)-1].sa.sa_handler != SIG_DFL) && \
...
@@ -269,6 +273,15 @@ void __exit_sighand(struct task_struct *tsk)
...
@@ -269,6 +273,15 @@ void __exit_sighand(struct task_struct *tsk)
kmem_cache_free
(
sigact_cachep
,
sig
);
kmem_cache_free
(
sigact_cachep
,
sig
);
}
else
{
}
else
{
struct
task_struct
*
leader
=
tsk
->
group_leader
;
struct
task_struct
*
leader
=
tsk
->
group_leader
;
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if
(
sig
->
group_exit_task
&&
atomic_read
(
&
sig
->
count
)
<=
2
)
{
wake_up_process
(
sig
->
group_exit_task
);
sig
->
group_exit_task
=
NULL
;
}
/*
/*
* If we are the last non-leader member of the thread
* If we are the last non-leader member of the thread
* group, and the leader is zombie, then notify the
* group, and the leader is zombie, then notify the
...
@@ -279,12 +292,15 @@ void __exit_sighand(struct task_struct *tsk)
...
@@ -279,12 +292,15 @@ void __exit_sighand(struct task_struct *tsk)
*/
*/
if
(
atomic_read
(
&
sig
->
count
)
==
1
&&
if
(
atomic_read
(
&
sig
->
count
)
==
1
&&
leader
->
state
==
TASK_ZOMBIE
)
{
leader
->
state
==
TASK_ZOMBIE
)
{
__remove_thread_group
(
tsk
,
sig
);
__remove_thread_group
(
tsk
,
sig
);
spin_unlock
(
&
sig
->
siglock
);
do_notify_parent
(
leader
,
leader
->
exit_signal
);
do_notify_parent
(
leader
,
leader
->
exit_signal
);
}
else
}
else
{
__remove_thread_group
(
tsk
,
sig
);
__remove_thread_group
(
tsk
,
sig
);
spin_unlock
(
&
sig
->
siglock
);
spin_unlock
(
&
sig
->
siglock
);
}
}
}
clear_tsk_thread_flag
(
tsk
,
TIF_SIGPENDING
);
clear_tsk_thread_flag
(
tsk
,
TIF_SIGPENDING
);
flush_sigqueue
(
&
tsk
->
pending
);
flush_sigqueue
(
&
tsk
->
pending
);
...
@@ -932,8 +948,8 @@ int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
...
@@ -932,8 +948,8 @@ int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
struct
task_struct
*
p
;
struct
task_struct
*
p
;
retval
=
-
ESRCH
;
retval
=
-
ESRCH
;
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
(
p
->
pgrp
==
pgrp
&&
thread_group_leader
(
p
)
)
{
if
(
p
->
pgrp
==
pgrp
)
{
int
err
=
send_sig_info
(
sig
,
info
,
p
);
int
err
=
send_sig_info
(
sig
,
info
,
p
);
if
(
retval
)
if
(
retval
)
retval
=
err
;
retval
=
err
;
...
@@ -970,7 +986,7 @@ kill_sl_info(int sig, struct siginfo *info, pid_t sess)
...
@@ -970,7 +986,7 @@ kill_sl_info(int sig, struct siginfo *info, pid_t sess)
retval
=
-
ESRCH
;
retval
=
-
ESRCH
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
(
p
->
leader
&&
p
->
session
==
sess
)
{
if
(
p
->
leader
&&
p
->
session
==
sess
)
{
int
err
=
send_sig_info
(
sig
,
info
,
p
);
int
err
=
send_sig_info
(
sig
,
info
,
p
);
if
(
retval
)
if
(
retval
)
...
@@ -1014,8 +1030,8 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
...
@@ -1014,8 +1030,8 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
struct
task_struct
*
p
;
struct
task_struct
*
p
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_
task
(
p
)
{
for_each_
process
(
p
)
{
if
(
p
->
pid
>
1
&&
p
!=
current
&&
thread_group_leader
(
p
)
)
{
if
(
p
->
pid
>
1
&&
p
!=
current
)
{
int
err
=
send_sig_info
(
sig
,
info
,
p
);
int
err
=
send_sig_info
(
sig
,
info
,
p
);
++
count
;
++
count
;
if
(
err
!=
-
EPERM
)
if
(
err
!=
-
EPERM
)
...
@@ -1099,7 +1115,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
...
@@ -1099,7 +1115,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
struct
siginfo
info
;
struct
siginfo
info
;
int
why
,
status
;
int
why
,
status
;
if
(
delay_group_leader
(
tsk
))
if
(
!
tsk
->
ptrace
&&
delay_group_leader
(
tsk
))
return
;
return
;
if
(
sig
==
-
1
)
if
(
sig
==
-
1
)
BUG
();
BUG
();
...
...
kernel/softirq.c
View file @
f2a5155d
...
@@ -395,8 +395,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
...
@@ -395,8 +395,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
int
hotcpu
=
(
unsigned
long
)
hcpu
;
int
hotcpu
=
(
unsigned
long
)
hcpu
;
if
(
action
==
CPU_ONLINE
)
{
if
(
action
==
CPU_ONLINE
)
{
if
(
kernel_thread
(
ksoftirqd
,
hcpu
,
if
(
kernel_thread
(
ksoftirqd
,
hcpu
,
CLONE_KERNEL
)
<
0
)
{
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGNAL
)
<
0
)
{
printk
(
"ksoftirqd for %i failed
\n
"
,
hotcpu
);
printk
(
"ksoftirqd for %i failed
\n
"
,
hotcpu
);
return
NOTIFY_BAD
;
return
NOTIFY_BAD
;
}
}
...
...
kernel/suspend.c
View file @
f2a5155d
...
@@ -204,14 +204,14 @@ void refrigerator(unsigned long flag)
...
@@ -204,14 +204,14 @@ void refrigerator(unsigned long flag)
int
freeze_processes
(
void
)
int
freeze_processes
(
void
)
{
{
int
todo
,
start_time
;
int
todo
,
start_time
;
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
printk
(
"Stopping tasks: "
);
printk
(
"Stopping tasks: "
);
start_time
=
jiffies
;
start_time
=
jiffies
;
do
{
do
{
todo
=
0
;
todo
=
0
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
unsigned
long
flags
;
unsigned
long
flags
;
INTERESTING
(
p
);
INTERESTING
(
p
);
if
(
p
->
flags
&
PF_FROZEN
)
if
(
p
->
flags
&
PF_FROZEN
)
...
@@ -224,7 +224,7 @@ int freeze_processes(void)
...
@@ -224,7 +224,7 @@ int freeze_processes(void)
signal_wake_up
(
p
);
signal_wake_up
(
p
);
spin_unlock_irqrestore
(
&
p
->
sigmask_lock
,
flags
);
spin_unlock_irqrestore
(
&
p
->
sigmask_lock
,
flags
);
todo
++
;
todo
++
;
}
}
while_each_thread
(
g
,
p
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
yield
();
yield
();
if
(
time_after
(
jiffies
,
start_time
+
TIMEOUT
))
{
if
(
time_after
(
jiffies
,
start_time
+
TIMEOUT
))
{
...
@@ -240,18 +240,19 @@ int freeze_processes(void)
...
@@ -240,18 +240,19 @@ int freeze_processes(void)
void
thaw_processes
(
void
)
void
thaw_processes
(
void
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
printk
(
"Restarting tasks..."
);
printk
(
"Restarting tasks..."
);
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
INTERESTING
(
p
);
INTERESTING
(
p
);
if
(
p
->
flags
&
PF_FROZEN
)
p
->
flags
&=
~
PF_FROZEN
;
if
(
p
->
flags
&
PF_FROZEN
)
p
->
flags
&=
~
PF_FROZEN
;
else
else
printk
(
KERN_INFO
" Strange, %s not stopped
\n
"
,
p
->
comm
);
printk
(
KERN_INFO
" Strange, %s not stopped
\n
"
,
p
->
comm
);
wake_up_process
(
p
);
wake_up_process
(
p
);
}
}
while_each_thread
(
g
,
p
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
printk
(
" done
\n
"
);
printk
(
" done
\n
"
);
MDELAY
(
500
);
MDELAY
(
500
);
...
...
kernel/sys.c
View file @
f2a5155d
...
@@ -227,7 +227,7 @@ static int proc_sel(struct task_struct *p, int which, int who)
...
@@ -227,7 +227,7 @@ static int proc_sel(struct task_struct *p, int which, int who)
asmlinkage
long
sys_setpriority
(
int
which
,
int
who
,
int
niceval
)
asmlinkage
long
sys_setpriority
(
int
which
,
int
who
,
int
niceval
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
int
error
;
int
error
;
if
(
which
>
2
||
which
<
0
)
if
(
which
>
2
||
which
<
0
)
...
@@ -241,7 +241,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
...
@@ -241,7 +241,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
niceval
=
19
;
niceval
=
19
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
int
no_nice
;
int
no_nice
;
if
(
!
proc_sel
(
p
,
which
,
who
))
if
(
!
proc_sel
(
p
,
which
,
who
))
continue
;
continue
;
...
@@ -262,8 +262,8 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
...
@@ -262,8 +262,8 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
continue
;
continue
;
}
}
set_user_nice
(
p
,
niceval
);
set_user_nice
(
p
,
niceval
);
}
while_each_thread
(
g
,
p
);
}
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
return
error
;
return
error
;
...
@@ -277,21 +277,21 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
...
@@ -277,21 +277,21 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
*/
*/
asmlinkage
long
sys_getpriority
(
int
which
,
int
who
)
asmlinkage
long
sys_getpriority
(
int
which
,
int
who
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
long
retval
=
-
ESRCH
;
long
retval
=
-
ESRCH
;
if
(
which
>
2
||
which
<
0
)
if
(
which
>
2
||
which
<
0
)
return
-
EINVAL
;
return
-
EINVAL
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
long
niceval
;
long
niceval
;
if
(
!
proc_sel
(
p
,
which
,
who
))
if
(
!
proc_sel
(
p
,
which
,
who
))
continue
;
continue
;
niceval
=
20
-
task_nice
(
p
);
niceval
=
20
-
task_nice
(
p
);
if
(
niceval
>
retval
)
if
(
niceval
>
retval
)
retval
=
niceval
;
retval
=
niceval
;
}
}
while_each_thread
(
g
,
p
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
return
retval
;
return
retval
;
...
@@ -882,12 +882,12 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
...
@@ -882,12 +882,12 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
if
(
p
->
leader
)
if
(
p
->
leader
)
goto
out
;
goto
out
;
if
(
pgid
!=
pid
)
{
if
(
pgid
!=
pid
)
{
struct
task_struct
*
tmp
;
struct
task_struct
*
g
,
*
tmp
;
for_each_task
(
tmp
)
{
do_each_thread
(
g
,
tmp
)
{
if
(
tmp
->
pgrp
==
pgid
&&
if
(
tmp
->
pgrp
==
pgid
&&
tmp
->
session
==
current
->
session
)
tmp
->
session
==
current
->
session
)
goto
ok_pgid
;
goto
ok_pgid
;
}
}
while_each_thread
(
g
,
tmp
);
goto
out
;
goto
out
;
}
}
...
@@ -956,14 +956,14 @@ asmlinkage long sys_getsid(pid_t pid)
...
@@ -956,14 +956,14 @@ asmlinkage long sys_getsid(pid_t pid)
asmlinkage
long
sys_setsid
(
void
)
asmlinkage
long
sys_setsid
(
void
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
int
err
=
-
EPERM
;
int
err
=
-
EPERM
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
if
(
p
->
pgrp
==
current
->
pid
)
if
(
p
->
pgrp
==
current
->
pid
)
goto
out
;
goto
out
;
}
while_each_thread
(
g
,
p
);
current
->
leader
=
1
;
current
->
leader
=
1
;
current
->
session
=
current
->
pgrp
=
current
->
pid
;
current
->
session
=
current
->
pgrp
=
current
->
pid
;
...
...
mm/oom_kill.c
View file @
f2a5155d
...
@@ -116,10 +116,10 @@ static int badness(struct task_struct *p)
...
@@ -116,10 +116,10 @@ static int badness(struct task_struct *p)
static
struct
task_struct
*
select_bad_process
(
void
)
static
struct
task_struct
*
select_bad_process
(
void
)
{
{
int
maxpoints
=
0
;
int
maxpoints
=
0
;
struct
task_struct
*
p
=
NULL
;
struct
task_struct
*
g
,
*
p
;
struct
task_struct
*
chosen
=
NULL
;
struct
task_struct
*
chosen
=
NULL
;
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
if
(
p
->
pid
)
{
if
(
p
->
pid
)
{
int
points
=
badness
(
p
);
int
points
=
badness
(
p
);
if
(
points
>
maxpoints
)
{
if
(
points
>
maxpoints
)
{
...
@@ -127,7 +127,7 @@ static struct task_struct * select_bad_process(void)
...
@@ -127,7 +127,7 @@ static struct task_struct * select_bad_process(void)
maxpoints
=
points
;
maxpoints
=
points
;
}
}
}
}
}
while_each_thread
(
g
,
p
);
return
chosen
;
return
chosen
;
}
}
...
@@ -166,7 +166,7 @@ void oom_kill_task(struct task_struct *p)
...
@@ -166,7 +166,7 @@ void oom_kill_task(struct task_struct *p)
*/
*/
static
void
oom_kill
(
void
)
static
void
oom_kill
(
void
)
{
{
struct
task_struct
*
p
,
*
q
;
struct
task_struct
*
g
,
*
p
,
*
q
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
p
=
select_bad_process
();
p
=
select_bad_process
();
...
@@ -176,9 +176,11 @@ static void oom_kill(void)
...
@@ -176,9 +176,11 @@ static void oom_kill(void)
panic
(
"Out of memory and no killable processes...
\n
"
);
panic
(
"Out of memory and no killable processes...
\n
"
);
/* kill all processes that share the ->mm (i.e. all threads) */
/* kill all processes that share the ->mm (i.e. all threads) */
for_each_task
(
q
)
{
do_each_thread
(
g
,
q
)
if
(
q
->
mm
==
p
->
mm
)
oom_kill_task
(
q
);
if
(
q
->
mm
==
p
->
mm
)
}
oom_kill_task
(
q
);
while_each_thread
(
g
,
q
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
/*
/*
...
...
mm/pdflush.c
View file @
f2a5155d
...
@@ -202,8 +202,7 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
...
@@ -202,8 +202,7 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
static
void
start_one_pdflush_thread
(
void
)
static
void
start_one_pdflush_thread
(
void
)
{
{
kernel_thread
(
pdflush
,
NULL
,
kernel_thread
(
pdflush
,
NULL
,
CLONE_KERNEL
);
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGNAL
);
}
}
static
int
__init
pdflush_init
(
void
)
static
int
__init
pdflush_init
(
void
)
...
...
mm/vmscan.c
View file @
f2a5155d
...
@@ -705,7 +705,7 @@ static int __init kswapd_init(void)
...
@@ -705,7 +705,7 @@ static int __init kswapd_init(void)
{
{
printk
(
"Starting kswapd
\n
"
);
printk
(
"Starting kswapd
\n
"
);
swap_setup
();
swap_setup
();
kernel_thread
(
kswapd
,
NULL
,
CLONE_
FS
|
CLONE_FILES
|
CLONE_SIGNA
L
);
kernel_thread
(
kswapd
,
NULL
,
CLONE_
KERNE
L
);
return
0
;
return
0
;
}
}
...
...
net/802/p8022.c
View file @
f2a5155d
...
@@ -28,18 +28,8 @@
...
@@ -28,18 +28,8 @@
static
int
p8022_request
(
struct
datalink_proto
*
dl
,
struct
sk_buff
*
skb
,
static
int
p8022_request
(
struct
datalink_proto
*
dl
,
struct
sk_buff
*
skb
,
unsigned
char
*
dest
)
unsigned
char
*
dest
)
{
{
union
llc_u_prim_data
prim_data
;
llc_build_and_send_ui_pkt
(
dl
->
sap
,
skb
,
dest
,
dl
->
sap
->
laddr
.
lsap
);
struct
llc_prim_if_block
prim
;
return
0
;
prim
.
data
=
&
prim_data
;
prim
.
sap
=
dl
->
sap
;
prim
.
prim
=
LLC_DATAUNIT_PRIM
;
prim_data
.
test
.
skb
=
skb
;
prim_data
.
test
.
saddr
.
lsap
=
dl
->
sap
->
laddr
.
lsap
;
prim_data
.
test
.
daddr
.
lsap
=
dl
->
sap
->
laddr
.
lsap
;
memcpy
(
prim_data
.
test
.
saddr
.
mac
,
skb
->
dev
->
dev_addr
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
test
.
daddr
.
mac
,
dest
,
IFHWADDRLEN
);
return
dl
->
sap
->
req
(
&
prim
);
}
}
struct
datalink_proto
*
register_8022_client
(
unsigned
char
type
,
struct
datalink_proto
*
register_8022_client
(
unsigned
char
type
,
...
...
net/802/psnap.c
View file @
f2a5155d
...
@@ -86,19 +86,9 @@ static int snap_indicate(struct llc_prim_if_block *prim)
...
@@ -86,19 +86,9 @@ static int snap_indicate(struct llc_prim_if_block *prim)
static
int
snap_request
(
struct
datalink_proto
*
dl
,
static
int
snap_request
(
struct
datalink_proto
*
dl
,
struct
sk_buff
*
skb
,
u8
*
dest
)
struct
sk_buff
*
skb
,
u8
*
dest
)
{
{
union
llc_u_prim_data
prim_data
;
struct
llc_prim_if_block
prim
;
memcpy
(
skb_push
(
skb
,
5
),
dl
->
type
,
5
);
memcpy
(
skb_push
(
skb
,
5
),
dl
->
type
,
5
);
prim
.
data
=
&
prim_data
;
llc_build_and_send_ui_pkt
(
snap_sap
,
skb
,
dest
,
snap_sap
->
laddr
.
lsap
);
prim
.
sap
=
snap_sap
;
return
0
;
prim
.
prim
=
LLC_DATAUNIT_PRIM
;
prim_data
.
test
.
skb
=
skb
;
prim_data
.
test
.
saddr
.
lsap
=
snap_sap
->
laddr
.
lsap
;
prim_data
.
test
.
daddr
.
lsap
=
snap_sap
->
laddr
.
lsap
;
memcpy
(
prim_data
.
test
.
saddr
.
mac
,
skb
->
dev
->
dev_addr
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
test
.
daddr
.
mac
,
dest
,
IFHWADDRLEN
);
return
snap_sap
->
req
(
&
prim
);
}
}
/*
/*
...
...
net/ipv4/netfilter/ipt_owner.c
View file @
f2a5155d
...
@@ -14,12 +14,12 @@
...
@@ -14,12 +14,12 @@
static
int
static
int
match_comm
(
const
struct
sk_buff
*
skb
,
const
char
*
comm
)
match_comm
(
const
struct
sk_buff
*
skb
,
const
char
*
comm
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
struct
files_struct
*
files
;
struct
files_struct
*
files
;
int
i
;
int
i
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
if
(
strncmp
(
p
->
comm
,
comm
,
sizeof
(
p
->
comm
)))
if
(
strncmp
(
p
->
comm
,
comm
,
sizeof
(
p
->
comm
)))
continue
;
continue
;
...
@@ -38,7 +38,7 @@ match_comm(const struct sk_buff *skb, const char *comm)
...
@@ -38,7 +38,7 @@ match_comm(const struct sk_buff *skb, const char *comm)
read_unlock
(
&
files
->
file_lock
);
read_unlock
(
&
files
->
file_lock
);
}
}
task_unlock
(
p
);
task_unlock
(
p
);
}
}
while_each_thread
(
g
,
p
);
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
return
0
;
return
0
;
}
}
...
@@ -77,12 +77,12 @@ match_pid(const struct sk_buff *skb, pid_t pid)
...
@@ -77,12 +77,12 @@ match_pid(const struct sk_buff *skb, pid_t pid)
static
int
static
int
match_sid
(
const
struct
sk_buff
*
skb
,
pid_t
sid
)
match_sid
(
const
struct
sk_buff
*
skb
,
pid_t
sid
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
struct
file
*
file
=
skb
->
sk
->
socket
->
file
;
struct
file
*
file
=
skb
->
sk
->
socket
->
file
;
int
i
,
found
=
0
;
int
i
,
found
=
0
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
struct
files_struct
*
files
;
struct
files_struct
*
files
;
if
(
p
->
session
!=
sid
)
if
(
p
->
session
!=
sid
)
continue
;
continue
;
...
@@ -100,9 +100,10 @@ match_sid(const struct sk_buff *skb, pid_t sid)
...
@@ -100,9 +100,10 @@ match_sid(const struct sk_buff *skb, pid_t sid)
read_unlock
(
&
files
->
file_lock
);
read_unlock
(
&
files
->
file_lock
);
}
}
task_unlock
(
p
);
task_unlock
(
p
);
if
(
found
)
if
(
found
)
break
;
goto
out
;
}
}
while_each_thread
(
g
,
p
);
out:
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
return
found
;
return
found
;
...
...
net/ipv6/netfilter/ip6t_owner.c
View file @
f2a5155d
...
@@ -49,12 +49,12 @@ match_pid(const struct sk_buff *skb, pid_t pid)
...
@@ -49,12 +49,12 @@ match_pid(const struct sk_buff *skb, pid_t pid)
static
int
static
int
match_sid
(
const
struct
sk_buff
*
skb
,
pid_t
sid
)
match_sid
(
const
struct
sk_buff
*
skb
,
pid_t
sid
)
{
{
struct
task_struct
*
p
;
struct
task_struct
*
g
,
*
p
;
struct
file
*
file
=
skb
->
sk
->
socket
->
file
;
struct
file
*
file
=
skb
->
sk
->
socket
->
file
;
int
i
,
found
=
0
;
int
i
,
found
=
0
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
for_each_task
(
p
)
{
do_each_thread
(
g
,
p
)
{
struct
files_struct
*
files
;
struct
files_struct
*
files
;
if
(
p
->
session
!=
sid
)
if
(
p
->
session
!=
sid
)
continue
;
continue
;
...
@@ -72,9 +72,10 @@ match_sid(const struct sk_buff *skb, pid_t sid)
...
@@ -72,9 +72,10 @@ match_sid(const struct sk_buff *skb, pid_t sid)
read_unlock
(
&
files
->
file_lock
);
read_unlock
(
&
files
->
file_lock
);
}
}
task_unlock
(
p
);
task_unlock
(
p
);
if
(
found
)
if
(
found
)
break
;
goto
out
;
}
}
while_each_thread
(
g
,
p
);
out:
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
return
found
;
return
found
;
...
...
net/llc/llc_actn.c
View file @
f2a5155d
...
@@ -34,7 +34,6 @@ int llc_station_ac_start_ack_timer(struct llc_station *station,
...
@@ -34,7 +34,6 @@ int llc_station_ac_start_ack_timer(struct llc_station *station,
station
->
ack_timer
.
data
=
(
unsigned
long
)
station
;
station
->
ack_timer
.
data
=
(
unsigned
long
)
station
;
station
->
ack_timer
.
function
=
llc_station_ack_tmr_callback
;
station
->
ack_timer
.
function
=
llc_station_ack_tmr_callback
;
add_timer
(
&
station
->
ack_timer
);
add_timer
(
&
station
->
ack_timer
);
station
->
ack_tmr_running
=
1
;
return
0
;
return
0
;
}
}
...
@@ -136,12 +135,10 @@ static void llc_station_ack_tmr_callback(unsigned long timeout_data)
...
@@ -136,12 +135,10 @@ static void llc_station_ack_tmr_callback(unsigned long timeout_data)
struct
llc_station
*
station
=
(
struct
llc_station
*
)
timeout_data
;
struct
llc_station
*
station
=
(
struct
llc_station
*
)
timeout_data
;
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
station
->
ack_tmr_running
=
0
;
if
(
skb
)
{
if
(
skb
)
{
struct
llc_station_state_ev
*
ev
=
llc_station_ev
(
skb
);
struct
llc_station_state_ev
*
ev
=
llc_station_ev
(
skb
);
ev
->
type
=
LLC_STATION_EV_TYPE_ACK_TMR
;
ev
->
type
=
LLC_STATION_EV_TYPE_ACK_TMR
;
ev
->
data
.
tmr
.
timer_specific
=
NULL
;
llc_station_state_process
(
station
,
skb
);
llc_station_state_process
(
station
,
skb
);
}
}
}
}
net/llc/llc_c_ac.c
View file @
f2a5155d
...
@@ -48,7 +48,6 @@ int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb)
...
@@ -48,7 +48,6 @@ int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb)
llc
->
remote_busy_flag
=
0
;
llc
->
remote_busy_flag
=
0
;
del_timer
(
&
llc
->
busy_state_timer
.
timer
);
del_timer
(
&
llc
->
busy_state_timer
.
timer
);
llc
->
busy_state_timer
.
running
=
0
;
nr
=
LLC_I_GET_NR
(
pdu
);
nr
=
LLC_I_GET_NR
(
pdu
);
llc_conn_resend_i_pdu_as_cmd
(
sk
,
nr
,
0
);
llc_conn_resend_i_pdu_as_cmd
(
sk
,
nr
,
0
);
}
}
...
@@ -252,10 +251,8 @@ int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
...
@@ -252,10 +251,8 @@ int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
{
{
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
if
(
llc
->
data_flag
==
2
)
{
if
(
llc
->
data_flag
==
2
)
del_timer
(
&
llc
->
rej_sent_timer
.
timer
);
del_timer
(
&
llc
->
rej_sent_timer
.
timer
);
llc
->
rej_sent_timer
.
running
=
0
;
}
return
0
;
return
0
;
}
}
...
@@ -672,7 +669,6 @@ int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb)
...
@@ -672,7 +669,6 @@ int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb)
llc
->
busy_state_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
busy_state_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
busy_state_timer
.
timer
.
function
=
llc_conn_busy_tmr_cb
;
llc
->
busy_state_timer
.
timer
.
function
=
llc_conn_busy_tmr_cb
;
add_timer
(
&
llc
->
busy_state_timer
.
timer
);
add_timer
(
&
llc
->
busy_state_timer
.
timer
);
llc
->
busy_state_timer
.
running
=
1
;
}
}
return
0
;
return
0
;
}
}
...
@@ -915,7 +911,6 @@ int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb)
...
@@ -915,7 +911,6 @@ int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb)
llc
->
pf_cycle_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
pf_cycle_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
pf_cycle_timer
.
timer
.
function
=
llc_conn_pf_cycle_tmr_cb
;
llc
->
pf_cycle_timer
.
timer
.
function
=
llc_conn_pf_cycle_tmr_cb
;
add_timer
(
&
llc
->
pf_cycle_timer
.
timer
);
add_timer
(
&
llc
->
pf_cycle_timer
.
timer
);
llc
->
pf_cycle_timer
.
running
=
1
;
return
0
;
return
0
;
}
}
...
@@ -1162,13 +1157,9 @@ int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
...
@@ -1162,13 +1157,9 @@ int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
del_timer
(
&
llc
->
pf_cycle_timer
.
timer
);
del_timer
(
&
llc
->
pf_cycle_timer
.
timer
);
llc
->
pf_cycle_timer
.
running
=
0
;
del_timer
(
&
llc
->
ack_timer
.
timer
);
del_timer
(
&
llc
->
ack_timer
.
timer
);
llc
->
ack_timer
.
running
=
0
;
del_timer
(
&
llc
->
rej_sent_timer
.
timer
);
del_timer
(
&
llc
->
rej_sent_timer
.
timer
);
llc
->
rej_sent_timer
.
running
=
0
;
del_timer
(
&
llc
->
busy_state_timer
.
timer
);
del_timer
(
&
llc
->
busy_state_timer
.
timer
);
llc
->
busy_state_timer
.
running
=
0
;
llc
->
ack_must_be_send
=
0
;
llc
->
ack_must_be_send
=
0
;
llc
->
ack_pf
=
0
;
llc
->
ack_pf
=
0
;
return
0
;
return
0
;
...
@@ -1179,11 +1170,8 @@ int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb)
...
@@ -1179,11 +1170,8 @@ int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb)
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
del_timer
(
&
llc
->
rej_sent_timer
.
timer
);
del_timer
(
&
llc
->
rej_sent_timer
.
timer
);
llc
->
rej_sent_timer
.
running
=
0
;
del_timer
(
&
llc
->
pf_cycle_timer
.
timer
);
del_timer
(
&
llc
->
pf_cycle_timer
.
timer
);
llc
->
pf_cycle_timer
.
running
=
0
;
del_timer
(
&
llc
->
busy_state_timer
.
timer
);
del_timer
(
&
llc
->
busy_state_timer
.
timer
);
llc
->
busy_state_timer
.
running
=
0
;
llc
->
ack_must_be_send
=
0
;
llc
->
ack_must_be_send
=
0
;
llc
->
ack_pf
=
0
;
llc
->
ack_pf
=
0
;
return
0
;
return
0
;
...
@@ -1198,7 +1186,6 @@ int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb)
...
@@ -1198,7 +1186,6 @@ int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb)
llc
->
ack_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
ack_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
ack_timer
.
timer
.
function
=
llc_conn_ack_tmr_cb
;
llc
->
ack_timer
.
timer
.
function
=
llc_conn_ack_tmr_cb
;
add_timer
(
&
llc
->
ack_timer
.
timer
);
add_timer
(
&
llc
->
ack_timer
.
timer
);
llc
->
ack_timer
.
running
=
1
;
return
0
;
return
0
;
}
}
...
@@ -1212,7 +1199,6 @@ int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb)
...
@@ -1212,7 +1199,6 @@ int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb)
llc
->
rej_sent_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
rej_sent_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
rej_sent_timer
.
timer
.
function
=
llc_conn_rej_tmr_cb
;
llc
->
rej_sent_timer
.
timer
.
function
=
llc_conn_rej_tmr_cb
;
add_timer
(
&
llc
->
rej_sent_timer
.
timer
);
add_timer
(
&
llc
->
rej_sent_timer
.
timer
);
llc
->
rej_sent_timer
.
running
=
1
;
return
0
;
return
0
;
}
}
...
@@ -1221,13 +1207,12 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
...
@@ -1221,13 +1207,12 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
{
{
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
if
(
!
llc
->
ack_timer
.
running
)
{
if
(
!
timer_pending
(
&
llc
->
ack_timer
.
timer
)
)
{
llc
->
ack_timer
.
timer
.
expires
=
jiffies
+
llc
->
ack_timer
.
timer
.
expires
=
jiffies
+
llc
->
ack_timer
.
expire
*
HZ
;
llc
->
ack_timer
.
expire
*
HZ
;
llc
->
ack_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
ack_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
ack_timer
.
timer
.
function
=
llc_conn_ack_tmr_cb
;
llc
->
ack_timer
.
timer
.
function
=
llc_conn_ack_tmr_cb
;
add_timer
(
&
llc
->
ack_timer
.
timer
);
add_timer
(
&
llc
->
ack_timer
.
timer
);
llc
->
ack_timer
.
running
=
1
;
}
}
return
0
;
return
0
;
}
}
...
@@ -1235,7 +1220,6 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
...
@@ -1235,7 +1220,6 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
int
llc_conn_ac_stop_ack_timer
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
int
llc_conn_ac_stop_ack_timer
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
{
{
del_timer
(
&
llc_sk
(
sk
)
->
ack_timer
.
timer
);
del_timer
(
&
llc_sk
(
sk
)
->
ack_timer
.
timer
);
llc_sk
(
sk
)
->
ack_timer
.
running
=
0
;
return
0
;
return
0
;
}
}
...
@@ -1244,7 +1228,6 @@ int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
...
@@ -1244,7 +1228,6 @@ int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
del_timer
(
&
llc
->
pf_cycle_timer
.
timer
);
del_timer
(
&
llc
->
pf_cycle_timer
.
timer
);
llc
->
pf_cycle_timer
.
running
=
0
;
llc
->
p_flag
=
0
;
llc
->
p_flag
=
0
;
return
0
;
return
0
;
}
}
...
@@ -1252,7 +1235,6 @@ int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
...
@@ -1252,7 +1235,6 @@ int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
int
llc_conn_ac_stop_rej_timer
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
int
llc_conn_ac_stop_rej_timer
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
{
{
del_timer
(
&
llc_sk
(
sk
)
->
rej_sent_timer
.
timer
);
del_timer
(
&
llc_sk
(
sk
)
->
rej_sent_timer
.
timer
);
llc_sk
(
sk
)
->
rej_sent_timer
.
running
=
0
;
return
0
;
return
0
;
}
}
...
@@ -1270,7 +1252,6 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
...
@@ -1270,7 +1252,6 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
if
(
acked
>
0
||
(
llc
->
dev
->
flags
&
IFF_LOOPBACK
))
{
if
(
acked
>
0
||
(
llc
->
dev
->
flags
&
IFF_LOOPBACK
))
{
llc
->
retry_count
=
0
;
llc
->
retry_count
=
0
;
del_timer
(
&
llc
->
ack_timer
.
timer
);
del_timer
(
&
llc
->
ack_timer
.
timer
);
llc
->
ack_timer
.
running
=
0
;
if
(
llc
->
failed_data_req
)
{
if
(
llc
->
failed_data_req
)
{
/* already, we did not accept data from upper layer
/* already, we did not accept data from upper layer
* (tx_window full or unacceptable state). Now, we
* (tx_window full or unacceptable state). Now, we
...
@@ -1285,7 +1266,6 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
...
@@ -1285,7 +1266,6 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
llc
->
ack_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
ack_timer
.
timer
.
data
=
(
unsigned
long
)
sk
;
llc
->
ack_timer
.
timer
.
function
=
llc_conn_ack_tmr_cb
;
llc
->
ack_timer
.
timer
.
function
=
llc_conn_ack_tmr_cb
;
add_timer
(
&
llc
->
ack_timer
.
timer
);
add_timer
(
&
llc
->
ack_timer
.
timer
);
llc
->
ack_timer
.
running
=
1
;
}
}
}
else
if
(
llc
->
failed_data_req
)
{
}
else
if
(
llc
->
failed_data_req
)
{
llc_pdu_decode_pf_bit
(
skb
,
&
fbit
);
llc_pdu_decode_pf_bit
(
skb
,
&
fbit
);
...
@@ -1423,13 +1403,11 @@ void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data)
...
@@ -1423,13 +1403,11 @@ void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data)
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
bh_lock_sock
(
sk
);
bh_lock_sock
(
sk
);
llc_sk
(
sk
)
->
pf_cycle_timer
.
running
=
0
;
if
(
skb
)
{
if
(
skb
)
{
struct
llc_conn_state_ev
*
ev
=
llc_conn_ev
(
skb
);
struct
llc_conn_state_ev
*
ev
=
llc_conn_ev
(
skb
);
skb
->
sk
=
sk
;
skb
->
sk
=
sk
;
ev
->
type
=
LLC_CONN_EV_TYPE_P_TMR
;
ev
->
type
=
LLC_CONN_EV_TYPE_P_TMR
;
ev
->
data
.
tmr
.
timer_specific
=
NULL
;
llc_process_tmr_ev
(
sk
,
skb
);
llc_process_tmr_ev
(
sk
,
skb
);
}
}
bh_unlock_sock
(
sk
);
bh_unlock_sock
(
sk
);
...
@@ -1441,13 +1419,11 @@ static void llc_conn_busy_tmr_cb(unsigned long timeout_data)
...
@@ -1441,13 +1419,11 @@ static void llc_conn_busy_tmr_cb(unsigned long timeout_data)
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
bh_lock_sock
(
sk
);
bh_lock_sock
(
sk
);
llc_sk
(
sk
)
->
busy_state_timer
.
running
=
0
;
if
(
skb
)
{
if
(
skb
)
{
struct
llc_conn_state_ev
*
ev
=
llc_conn_ev
(
skb
);
struct
llc_conn_state_ev
*
ev
=
llc_conn_ev
(
skb
);
skb
->
sk
=
sk
;
skb
->
sk
=
sk
;
ev
->
type
=
LLC_CONN_EV_TYPE_BUSY_TMR
;
ev
->
type
=
LLC_CONN_EV_TYPE_BUSY_TMR
;
ev
->
data
.
tmr
.
timer_specific
=
NULL
;
llc_process_tmr_ev
(
sk
,
skb
);
llc_process_tmr_ev
(
sk
,
skb
);
}
}
bh_unlock_sock
(
sk
);
bh_unlock_sock
(
sk
);
...
@@ -1459,13 +1435,11 @@ void llc_conn_ack_tmr_cb(unsigned long timeout_data)
...
@@ -1459,13 +1435,11 @@ void llc_conn_ack_tmr_cb(unsigned long timeout_data)
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
bh_lock_sock
(
sk
);
bh_lock_sock
(
sk
);
llc_sk
(
sk
)
->
ack_timer
.
running
=
0
;
if
(
skb
)
{
if
(
skb
)
{
struct
llc_conn_state_ev
*
ev
=
llc_conn_ev
(
skb
);
struct
llc_conn_state_ev
*
ev
=
llc_conn_ev
(
skb
);
skb
->
sk
=
sk
;
skb
->
sk
=
sk
;
ev
->
type
=
LLC_CONN_EV_TYPE_ACK_TMR
;
ev
->
type
=
LLC_CONN_EV_TYPE_ACK_TMR
;
ev
->
data
.
tmr
.
timer_specific
=
NULL
;
llc_process_tmr_ev
(
sk
,
skb
);
llc_process_tmr_ev
(
sk
,
skb
);
}
}
bh_unlock_sock
(
sk
);
bh_unlock_sock
(
sk
);
...
@@ -1477,13 +1451,11 @@ static void llc_conn_rej_tmr_cb(unsigned long timeout_data)
...
@@ -1477,13 +1451,11 @@ static void llc_conn_rej_tmr_cb(unsigned long timeout_data)
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
struct
sk_buff
*
skb
=
alloc_skb
(
0
,
GFP_ATOMIC
);
bh_lock_sock
(
sk
);
bh_lock_sock
(
sk
);
llc_sk
(
sk
)
->
rej_sent_timer
.
running
=
0
;
if
(
skb
)
{
if
(
skb
)
{
struct
llc_conn_state_ev
*
ev
=
llc_conn_ev
(
skb
);
struct
llc_conn_state_ev
*
ev
=
llc_conn_ev
(
skb
);
skb
->
sk
=
sk
;
skb
->
sk
=
sk
;
ev
->
type
=
LLC_CONN_EV_TYPE_REJ_TMR
;
ev
->
type
=
LLC_CONN_EV_TYPE_REJ_TMR
;
ev
->
data
.
tmr
.
timer_specific
=
NULL
;
llc_process_tmr_ev
(
sk
,
skb
);
llc_process_tmr_ev
(
sk
,
skb
);
}
}
bh_unlock_sock
(
sk
);
bh_unlock_sock
(
sk
);
...
...
net/llc/llc_if.c
View file @
f2a5155d
...
@@ -86,7 +86,8 @@ void llc_sap_close(struct llc_sap *sap)
...
@@ -86,7 +86,8 @@ void llc_sap_close(struct llc_sap *sap)
* llc_build_and_send_ui_pkt - unitdata request interface for upper layers
* llc_build_and_send_ui_pkt - unitdata request interface for upper layers
* @sap: sap to use
* @sap: sap to use
* @skb: packet to send
* @skb: packet to send
* @addr: destination address
* @dmac: destination mac address
* @dsap: destination sap
*
*
* Upper layers calls this function when upper layer wants to send data
* Upper layers calls this function when upper layer wants to send data
* using connection-less mode communication (UI pdu).
* using connection-less mode communication (UI pdu).
...
@@ -95,25 +96,22 @@ void llc_sap_close(struct llc_sap *sap)
...
@@ -95,25 +96,22 @@ void llc_sap_close(struct llc_sap *sap)
* less mode communication; timeout/retries handled by network layer;
* less mode communication; timeout/retries handled by network layer;
* package primitive as an event and send to SAP event handler
* package primitive as an event and send to SAP event handler
*/
*/
void
llc_build_and_send_ui_pkt
(
struct
llc_sap
*
sap
,
void
llc_build_and_send_ui_pkt
(
struct
llc_sap
*
sap
,
struct
sk_buff
*
skb
,
struct
sk_buff
*
skb
,
u8
*
dmac
,
u8
dsap
)
struct
sockaddr_llc
*
addr
)
{
{
union
llc_u_prim_data
prim_data
;
union
llc_u_prim_data
prim_data
;
struct
llc_prim_if_block
prim
;
struct
llc_prim_if_block
prim
;
struct
llc_sap_state_ev
*
ev
=
llc_sap_ev
(
skb
);
struct
llc_sap_state_ev
*
ev
=
llc_sap_ev
(
skb
);
skb
->
protocol
=
llc_proto_type
(
addr
->
sllc_arphrd
);
prim
.
data
=
&
prim_data
;
prim
.
data
=
&
prim_data
;
prim
.
sap
=
sap
;
prim
.
sap
=
sap
;
prim
.
prim
=
LLC_DATAUNIT_PRIM
;
prim
.
prim
=
LLC_DATAUNIT_PRIM
;
prim_data
.
udata
.
skb
=
skb
;
prim_data
.
udata
.
skb
=
skb
;
prim_data
.
udata
.
saddr
.
lsap
=
sap
->
laddr
.
lsap
;
prim_data
.
udata
.
saddr
.
lsap
=
sap
->
laddr
.
lsap
;
prim_data
.
udata
.
daddr
.
lsap
=
addr
->
sllc_
dsap
;
prim_data
.
udata
.
daddr
.
lsap
=
dsap
;
memcpy
(
prim_data
.
udata
.
saddr
.
mac
,
skb
->
dev
->
dev_addr
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
udata
.
saddr
.
mac
,
skb
->
dev
->
dev_addr
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
udata
.
daddr
.
mac
,
addr
->
sllc_
dmac
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
udata
.
daddr
.
mac
,
dmac
,
IFHWADDRLEN
);
ev
->
type
=
LLC_SAP_EV_TYPE_PRIM
;
ev
->
type
=
LLC_SAP_EV_TYPE_PRIM
;
ev
->
data
.
prim
.
prim
=
LLC_DATAUNIT_PRIM
;
ev
->
data
.
prim
.
prim
=
LLC_DATAUNIT_PRIM
;
...
@@ -126,30 +124,28 @@ void llc_build_and_send_ui_pkt(struct llc_sap *sap,
...
@@ -126,30 +124,28 @@ void llc_build_and_send_ui_pkt(struct llc_sap *sap,
* llc_build_and_send_test_pkt - TEST interface for upper layers.
* llc_build_and_send_test_pkt - TEST interface for upper layers.
* @sap: sap to use
* @sap: sap to use
* @skb: packet to send
* @skb: packet to send
* @addr: destination address
* @dmac: destination mac address
* @dsap: destination sap
*
*
* This function is called when upper layer wants to send a TEST pdu.
* This function is called when upper layer wants to send a TEST pdu.
* Returns 0 for success, 1 otherwise.
* Returns 0 for success, 1 otherwise.
*/
*/
void
llc_build_and_send_test_pkt
(
struct
llc_sap
*
sap
,
void
llc_build_and_send_test_pkt
(
struct
llc_sap
*
sap
,
struct
sk_buff
*
skb
,
struct
sk_buff
*
skb
,
u8
*
dmac
,
u8
dsap
)
struct
sockaddr_llc
*
addr
)
{
{
union
llc_u_prim_data
prim_data
;
union
llc_u_prim_data
prim_data
;
struct
llc_prim_if_block
prim
;
struct
llc_prim_if_block
prim
;
struct
llc_sap_state_ev
*
ev
=
llc_sap_ev
(
skb
);
struct
llc_sap_state_ev
*
ev
=
llc_sap_ev
(
skb
);
skb
->
protocol
=
llc_proto_type
(
addr
->
sllc_arphrd
);
prim
.
data
=
&
prim_data
;
prim
.
data
=
&
prim_data
;
prim
.
sap
=
sap
;
prim
.
sap
=
sap
;
prim
.
prim
=
LLC_TEST_PRIM
;
prim
.
prim
=
LLC_TEST_PRIM
;
prim_data
.
test
.
skb
=
skb
;
prim_data
.
test
.
skb
=
skb
;
prim_data
.
test
.
saddr
.
lsap
=
sap
->
laddr
.
lsap
;
prim_data
.
test
.
saddr
.
lsap
=
sap
->
laddr
.
lsap
;
prim_data
.
test
.
daddr
.
lsap
=
addr
->
sllc_
dsap
;
prim_data
.
test
.
daddr
.
lsap
=
dsap
;
memcpy
(
prim_data
.
test
.
saddr
.
mac
,
skb
->
dev
->
dev_addr
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
test
.
saddr
.
mac
,
skb
->
dev
->
dev_addr
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
test
.
daddr
.
mac
,
addr
->
sllc_
dmac
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
test
.
daddr
.
mac
,
dmac
,
IFHWADDRLEN
);
ev
->
type
=
LLC_SAP_EV_TYPE_PRIM
;
ev
->
type
=
LLC_SAP_EV_TYPE_PRIM
;
ev
->
data
.
prim
.
prim
=
LLC_TEST_PRIM
;
ev
->
data
.
prim
.
prim
=
LLC_TEST_PRIM
;
...
@@ -162,30 +158,28 @@ void llc_build_and_send_test_pkt(struct llc_sap *sap,
...
@@ -162,30 +158,28 @@ void llc_build_and_send_test_pkt(struct llc_sap *sap,
* llc_build_and_send_xid_pkt - XID interface for upper layers
* llc_build_and_send_xid_pkt - XID interface for upper layers
* @sap: sap to use
* @sap: sap to use
* @skb: packet to send
* @skb: packet to send
* @addr: destination address
* @dmac: destination mac address
* @dsap: destination sap
*
*
* This function is called when upper layer wants to send a XID pdu.
* This function is called when upper layer wants to send a XID pdu.
* Returns 0 for success, 1 otherwise.
* Returns 0 for success, 1 otherwise.
*/
*/
void
llc_build_and_send_xid_pkt
(
struct
llc_sap
*
sap
,
void
llc_build_and_send_xid_pkt
(
struct
llc_sap
*
sap
,
struct
sk_buff
*
skb
,
struct
sk_buff
*
skb
,
u8
*
dmac
,
u8
dsap
)
struct
sockaddr_llc
*
addr
)
{
{
union
llc_u_prim_data
prim_data
;
union
llc_u_prim_data
prim_data
;
struct
llc_prim_if_block
prim
;
struct
llc_prim_if_block
prim
;
struct
llc_sap_state_ev
*
ev
=
llc_sap_ev
(
skb
);
struct
llc_sap_state_ev
*
ev
=
llc_sap_ev
(
skb
);
skb
->
protocol
=
llc_proto_type
(
addr
->
sllc_arphrd
);
prim
.
data
=
&
prim_data
;
prim
.
data
=
&
prim_data
;
prim
.
sap
=
sap
;
prim
.
sap
=
sap
;
prim
.
prim
=
LLC_XID_PRIM
;
prim
.
prim
=
LLC_XID_PRIM
;
prim_data
.
xid
.
skb
=
skb
;
prim_data
.
xid
.
skb
=
skb
;
prim_data
.
xid
.
saddr
.
lsap
=
sap
->
laddr
.
lsap
;
prim_data
.
xid
.
saddr
.
lsap
=
sap
->
laddr
.
lsap
;
prim_data
.
xid
.
daddr
.
lsap
=
addr
->
sllc_
dsap
;
prim_data
.
xid
.
daddr
.
lsap
=
dsap
;
memcpy
(
prim_data
.
xid
.
saddr
.
mac
,
skb
->
dev
->
dev_addr
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
xid
.
saddr
.
mac
,
skb
->
dev
->
dev_addr
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
xid
.
daddr
.
mac
,
addr
->
sllc_
dmac
,
IFHWADDRLEN
);
memcpy
(
prim_data
.
xid
.
daddr
.
mac
,
dmac
,
IFHWADDRLEN
);
ev
->
type
=
LLC_SAP_EV_TYPE_PRIM
;
ev
->
type
=
LLC_SAP_EV_TYPE_PRIM
;
ev
->
data
.
prim
.
prim
=
LLC_XID_PRIM
;
ev
->
data
.
prim
.
prim
=
LLC_XID_PRIM
;
...
@@ -196,7 +190,8 @@ void llc_build_and_send_xid_pkt(struct llc_sap *sap,
...
@@ -196,7 +190,8 @@ void llc_build_and_send_xid_pkt(struct llc_sap *sap,
/**
/**
* llc_build_and_send_pkt - Connection data sending for upper layers.
* llc_build_and_send_pkt - Connection data sending for upper layers.
* @prim: pointer to structure that contains service parameters
* @sk: connection
* @skb: packet to send
*
*
* This function is called when upper layer wants to send data using
* This function is called when upper layer wants to send data using
* connection oriented communication mode. During sending data, connection
* connection oriented communication mode. During sending data, connection
...
@@ -352,3 +347,4 @@ int llc_build_and_send_reset_pkt(struct sock *sk,
...
@@ -352,3 +347,4 @@ int llc_build_and_send_reset_pkt(struct sock *sk,
EXPORT_SYMBOL
(
llc_sap_open
);
EXPORT_SYMBOL
(
llc_sap_open
);
EXPORT_SYMBOL
(
llc_sap_close
);
EXPORT_SYMBOL
(
llc_sap_close
);
EXPORT_SYMBOL
(
llc_build_and_send_ui_pkt
);
net/llc/llc_sap.c
View file @
f2a5155d
...
@@ -78,9 +78,6 @@ void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
...
@@ -78,9 +78,6 @@ void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
sap
->
ind
(
ev
->
prim
);
sap
->
ind
(
ev
->
prim
);
else
if
(
ev
->
type
==
LLC_SAP_EV_TYPE_PDU
)
else
if
(
ev
->
type
==
LLC_SAP_EV_TYPE_PDU
)
kfree_skb
(
skb
);
kfree_skb
(
skb
);
else
printk
(
KERN_INFO
":%s !kfree_skb & it is %s in a list
\n
"
,
__FUNCTION__
,
skb
->
list
?
""
:
"NOT"
);
}
}
/**
/**
...
...
net/llc/llc_sock.c
View file @
f2a5155d
...
@@ -77,6 +77,18 @@ static __inline__ u16 llc_ui_next_link_no(int sap)
...
@@ -77,6 +77,18 @@ static __inline__ u16 llc_ui_next_link_no(int sap)
return
llc_ui_sap_link_no_max
[
sap
]
++
;
return
llc_ui_sap_link_no_max
[
sap
]
++
;
}
}
/**
* llc_proto_type - return eth protocol for ARP header type
* @arphrd: ARP header type.
*
* Given an ARP header type return the corresponding ethernet protocol.
*/
static
__inline__
u16
llc_proto_type
(
u16
arphrd
)
{
return
arphrd
==
ARPHRD_IEEE802_TR
?
htons
(
ETH_P_TR_802_2
)
:
htons
(
ETH_P_802_2
);
}
/**
/**
* llc_ui_addr_null - determines if a address structure is null
* llc_ui_addr_null - determines if a address structure is null
* @addr: Address to test if null.
* @addr: Address to test if null.
...
@@ -117,13 +129,11 @@ static __inline__ u8 llc_ui_header_len(struct sock *sk,
...
@@ -117,13 +129,11 @@ static __inline__ u8 llc_ui_header_len(struct sock *sk,
* Send data via reliable llc2 connection.
* Send data via reliable llc2 connection.
* Returns 0 upon success, non-zero if action did not succeed.
* Returns 0 upon success, non-zero if action did not succeed.
*/
*/
static
int
llc_ui_send_data
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
,
static
int
llc_ui_send_data
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
,
int
noblock
)
struct
sockaddr_llc
*
addr
,
int
noblock
)
{
{
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
struct
llc_opt
*
llc
=
llc_sk
(
sk
);
int
rc
=
0
;
int
rc
=
0
;
skb
->
protocol
=
llc_proto_type
(
addr
->
sllc_arphrd
);
if
(
llc_data_accept_state
(
llc
->
state
)
||
llc
->
p_flag
)
{
if
(
llc_data_accept_state
(
llc
->
state
)
||
llc
->
p_flag
)
{
int
timeout
=
sock_sndtimeo
(
sk
,
noblock
);
int
timeout
=
sock_sndtimeo
(
sk
,
noblock
);
...
@@ -942,26 +952,30 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, int len,
...
@@ -942,26 +952,30 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, int len,
goto
release
;
goto
release
;
skb
->
sk
=
sk
;
skb
->
sk
=
sk
;
skb
->
dev
=
dev
;
skb
->
dev
=
dev
;
skb
->
protocol
=
llc_proto_type
(
addr
->
sllc_arphrd
);
skb_reserve
(
skb
,
dev
->
hard_header_len
+
llc_ui_header_len
(
sk
,
addr
));
skb_reserve
(
skb
,
dev
->
hard_header_len
+
llc_ui_header_len
(
sk
,
addr
));
rc
=
memcpy_fromiovec
(
skb_put
(
skb
,
len
),
msg
->
msg_iov
,
len
);
rc
=
memcpy_fromiovec
(
skb_put
(
skb
,
len
),
msg
->
msg_iov
,
len
);
if
(
rc
)
if
(
rc
)
goto
out
;
goto
out
;
if
(
addr
->
sllc_test
)
{
if
(
addr
->
sllc_test
)
{
llc_build_and_send_test_pkt
(
llc
->
sap
,
skb
,
addr
);
llc_build_and_send_test_pkt
(
llc
->
sap
,
skb
,
addr
->
sllc_dmac
,
addr
->
sllc_dsap
);
goto
out
;
goto
out
;
}
}
if
(
addr
->
sllc_xid
)
{
if
(
addr
->
sllc_xid
)
{
llc_build_and_send_xid_pkt
(
llc
->
sap
,
skb
,
addr
);
llc_build_and_send_xid_pkt
(
llc
->
sap
,
skb
,
addr
->
sllc_dmac
,
addr
->
sllc_dsap
);
goto
out
;
goto
out
;
}
}
if
(
sk
->
type
==
SOCK_DGRAM
||
addr
->
sllc_ua
)
{
if
(
sk
->
type
==
SOCK_DGRAM
||
addr
->
sllc_ua
)
{
llc_build_and_send_ui_pkt
(
llc
->
sap
,
skb
,
addr
);
llc_build_and_send_ui_pkt
(
llc
->
sap
,
skb
,
addr
->
sllc_dmac
,
addr
->
sllc_dsap
);
goto
out
;
goto
out
;
}
}
rc
=
-
ENOPROTOOPT
;
rc
=
-
ENOPROTOOPT
;
if
(
!
(
sk
->
type
==
SOCK_STREAM
&&
!
addr
->
sllc_ua
))
if
(
!
(
sk
->
type
==
SOCK_STREAM
&&
!
addr
->
sllc_ua
))
goto
out
;
goto
out
;
rc
=
llc_ui_send_data
(
sk
,
skb
,
addr
,
noblock
);
rc
=
llc_ui_send_data
(
sk
,
skb
,
noblock
);
if
(
rc
)
if
(
rc
)
dprintk
(
"%s: llc_ui_send_data failed: %d
\n
"
,
__FUNCTION__
,
rc
);
dprintk
(
"%s: llc_ui_send_data failed: %d
\n
"
,
__FUNCTION__
,
rc
);
out:
out:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment