Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
e7858f52
Commit
e7858f52
authored
May 08, 2010
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'cpu_stop' of
git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc
into sched/core
parents
27a9da65
bbf1bb3e
Changes
13
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
604 additions
and
439 deletions
+604
-439
Documentation/RCU/torture.txt
Documentation/RCU/torture.txt
+0
-10
arch/s390/kernel/time.c
arch/s390/kernel/time.c
+0
-1
drivers/xen/manage.c
drivers/xen/manage.c
+2
-12
include/linux/rcutiny.h
include/linux/rcutiny.h
+0
-2
include/linux/rcutree.h
include/linux/rcutree.h
+0
-1
include/linux/stop_machine.h
include/linux/stop_machine.h
+95
-27
kernel/Makefile
kernel/Makefile
+1
-1
kernel/cpu.c
kernel/cpu.c
+0
-8
kernel/module.c
kernel/module.c
+2
-12
kernel/rcutorture.c
kernel/rcutorture.c
+1
-1
kernel/sched.c
kernel/sched.c
+60
-225
kernel/sched_fair.c
kernel/sched_fair.c
+34
-14
kernel/stop_machine.c
kernel/stop_machine.c
+409
-125
No files found.
Documentation/RCU/torture.txt
View file @
e7858f52
...
...
@@ -182,16 +182,6 @@ Similarly, sched_expedited RCU provides the following:
sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0
sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0
sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0
state: -1 / 0:0 3:0 4:0
As before, the first four lines are similar to those for RCU.
The last line shows the task-migration state. The first number is
-1 if synchronize_sched_expedited() is idle, -2 if in the process of
posting wakeups to the migration kthreads, and N when waiting on CPU N.
Each of the colon-separated fields following the "/" is a CPU:state pair.
Valid states are "0" for idle, "1" for waiting for quiescent state,
"2" for passed through quiescent state, and "3" when a race with a
CPU-hotplug event forces use of the synchronize_sched() primitive.
USAGE
...
...
arch/s390/kernel/time.c
View file @
e7858f52
...
...
@@ -391,7 +391,6 @@ static void __init time_init_wq(void)
if
(
time_sync_wq
)
return
;
time_sync_wq
=
create_singlethread_workqueue
(
"timesync"
);
stop_machine_create
();
}
/*
...
...
drivers/xen/manage.c
View file @
e7858f52
...
...
@@ -80,12 +80,6 @@ static void do_suspend(void)
shutting_down
=
SHUTDOWN_SUSPEND
;
err
=
stop_machine_create
();
if
(
err
)
{
printk
(
KERN_ERR
"xen suspend: failed to setup stop_machine %d
\n
"
,
err
);
goto
out
;
}
#ifdef CONFIG_PREEMPT
/* If the kernel is preemptible, we need to freeze all the processes
to prevent them from being in the middle of a pagetable update
...
...
@@ -93,7 +87,7 @@ static void do_suspend(void)
err
=
freeze_processes
();
if
(
err
)
{
printk
(
KERN_ERR
"xen suspend: freeze failed %d
\n
"
,
err
);
goto
out
_destroy_sm
;
goto
out
;
}
#endif
...
...
@@ -136,12 +130,8 @@ static void do_suspend(void)
out_thaw:
#ifdef CONFIG_PREEMPT
thaw_processes
();
out_destroy_sm:
#endif
stop_machine_destroy
();
out:
#endif
shutting_down
=
SHUTDOWN_INVALID
;
}
#endif
/* CONFIG_PM_SLEEP */
...
...
include/linux/rcutiny.h
View file @
e7858f52
...
...
@@ -60,8 +60,6 @@ static inline long rcu_batches_completed_bh(void)
return
0
;
}
extern
int
rcu_expedited_torture_stats
(
char
*
page
);
static
inline
void
rcu_force_quiescent_state
(
void
)
{
}
...
...
include/linux/rcutree.h
View file @
e7858f52
...
...
@@ -35,7 +35,6 @@ struct notifier_block;
extern
void
rcu_sched_qs
(
int
cpu
);
extern
void
rcu_bh_qs
(
int
cpu
);
extern
int
rcu_needs_cpu
(
int
cpu
);
extern
int
rcu_expedited_torture_stats
(
char
*
page
);
#ifdef CONFIG_TREE_PREEMPT_RCU
...
...
include/linux/stop_machine.h
View file @
e7858f52
#ifndef _LINUX_STOP_MACHINE
#define _LINUX_STOP_MACHINE
/* "Bogolock": stop the entire machine, disable interrupts. This is a
very heavy lock, which is equivalent to grabbing every spinlock
(and more). So the "read" side to such a lock is anything which
disables preeempt. */
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <asm/system.h>
/*
* stop_cpu[s]() is simplistic per-cpu maximum priority cpu
* monopolization mechanism. The caller can specify a non-sleeping
* function to be executed on a single or multiple cpus preempting all
* other processes and monopolizing those cpus until it finishes.
*
* Resources for this mechanism are preallocated when a cpu is brought
* up and requests are guaranteed to be served as long as the target
* cpus are online.
*/
typedef
int
(
*
cpu_stop_fn_t
)(
void
*
arg
);
#ifdef CONFIG_SMP
struct
cpu_stop_work
{
struct
list_head
list
;
/* cpu_stopper->works */
cpu_stop_fn_t
fn
;
void
*
arg
;
struct
cpu_stop_done
*
done
;
};
int
stop_one_cpu
(
unsigned
int
cpu
,
cpu_stop_fn_t
fn
,
void
*
arg
);
void
stop_one_cpu_nowait
(
unsigned
int
cpu
,
cpu_stop_fn_t
fn
,
void
*
arg
,
struct
cpu_stop_work
*
work_buf
);
int
stop_cpus
(
const
struct
cpumask
*
cpumask
,
cpu_stop_fn_t
fn
,
void
*
arg
);
int
try_stop_cpus
(
const
struct
cpumask
*
cpumask
,
cpu_stop_fn_t
fn
,
void
*
arg
);
#else
/* CONFIG_SMP */
#include <linux/workqueue.h>
struct
cpu_stop_work
{
struct
work_struct
work
;
cpu_stop_fn_t
fn
;
void
*
arg
;
};
static
inline
int
stop_one_cpu
(
unsigned
int
cpu
,
cpu_stop_fn_t
fn
,
void
*
arg
)
{
int
ret
=
-
ENOENT
;
preempt_disable
();
if
(
cpu
==
smp_processor_id
())
ret
=
fn
(
arg
);
preempt_enable
();
return
ret
;
}
static
void
stop_one_cpu_nowait_workfn
(
struct
work_struct
*
work
)
{
struct
cpu_stop_work
*
stwork
=
container_of
(
work
,
struct
cpu_stop_work
,
work
);
preempt_disable
();
stwork
->
fn
(
stwork
->
arg
);
preempt_enable
();
}
static
inline
void
stop_one_cpu_nowait
(
unsigned
int
cpu
,
cpu_stop_fn_t
fn
,
void
*
arg
,
struct
cpu_stop_work
*
work_buf
)
{
if
(
cpu
==
smp_processor_id
())
{
INIT_WORK
(
&
work_buf
->
work
,
stop_one_cpu_nowait_workfn
);
work_buf
->
fn
=
fn
;
work_buf
->
arg
=
arg
;
schedule_work
(
&
work_buf
->
work
);
}
}
static
inline
int
stop_cpus
(
const
struct
cpumask
*
cpumask
,
cpu_stop_fn_t
fn
,
void
*
arg
)
{
if
(
cpumask_test_cpu
(
raw_smp_processor_id
(),
cpumask
))
return
stop_one_cpu
(
raw_smp_processor_id
(),
fn
,
arg
);
return
-
ENOENT
;
}
static
inline
int
try_stop_cpus
(
const
struct
cpumask
*
cpumask
,
cpu_stop_fn_t
fn
,
void
*
arg
)
{
return
stop_cpus
(
cpumask
,
fn
,
arg
);
}
#endif
/* CONFIG_SMP */
/*
* stop_machine "Bogolock": stop the entire machine, disable
* interrupts. This is a very heavy lock, which is equivalent to
* grabbing every spinlock (and more). So the "read" side to such a
* lock is anything which disables preeempt.
*/
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
/**
...
...
@@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
*/
int
__stop_machine
(
int
(
*
fn
)(
void
*
),
void
*
data
,
const
struct
cpumask
*
cpus
);
/**
* stop_machine_create: create all stop_machine threads
*
* Description: This causes all stop_machine threads to be created before
* stop_machine actually gets called. This can be used by subsystems that
* need a non failing stop_machine infrastructure.
*/
int
stop_machine_create
(
void
);
/**
* stop_machine_destroy: destroy all stop_machine threads
*
* Description: This causes all stop_machine threads which were created with
* stop_machine_create to be destroyed again.
*/
void
stop_machine_destroy
(
void
);
#else
#else
/* CONFIG_STOP_MACHINE && CONFIG_SMP */
static
inline
int
stop_machine
(
int
(
*
fn
)(
void
*
),
void
*
data
,
const
struct
cpumask
*
cpus
)
...
...
@@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data,
return
ret
;
}
static
inline
int
stop_machine_create
(
void
)
{
return
0
;
}
static
inline
void
stop_machine_destroy
(
void
)
{
}
#endif
/* CONFIG_SMP */
#endif
/* _LINUX_STOP_MACHINE */
#endif
/* CONFIG_STOP_MACHINE && CONFIG_SMP */
#endif
/* _LINUX_STOP_MACHINE */
kernel/Makefile
View file @
e7858f52
...
...
@@ -68,7 +68,7 @@ obj-$(CONFIG_USER_NS) += user_namespace.o
obj-$(CONFIG_PID_NS)
+=
pid_namespace.o
obj-$(CONFIG_IKCONFIG)
+=
configs.o
obj-$(CONFIG_RESOURCE_COUNTERS)
+=
res_counter.o
obj-$(CONFIG_S
TOP_MACHINE
)
+=
stop_machine.o
obj-$(CONFIG_S
MP
)
+=
stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST)
+=
test_kprobes.o
obj-$(CONFIG_AUDIT)
+=
audit.o auditfilter.o audit_watch.o
obj-$(CONFIG_AUDITSYSCALL)
+=
auditsc.o
...
...
kernel/cpu.c
View file @
e7858f52
...
...
@@ -266,9 +266,6 @@ int __ref cpu_down(unsigned int cpu)
{
int
err
;
err
=
stop_machine_create
();
if
(
err
)
return
err
;
cpu_maps_update_begin
();
if
(
cpu_hotplug_disabled
)
{
...
...
@@ -280,7 +277,6 @@ int __ref cpu_down(unsigned int cpu)
out:
cpu_maps_update_done
();
stop_machine_destroy
();
return
err
;
}
EXPORT_SYMBOL
(
cpu_down
);
...
...
@@ -361,9 +357,6 @@ int disable_nonboot_cpus(void)
{
int
cpu
,
first_cpu
,
error
;
error
=
stop_machine_create
();
if
(
error
)
return
error
;
cpu_maps_update_begin
();
first_cpu
=
cpumask_first
(
cpu_online_mask
);
/*
...
...
@@ -394,7 +387,6 @@ int disable_nonboot_cpus(void)
printk
(
KERN_ERR
"Non-boot CPUs are not disabled
\n
"
);
}
cpu_maps_update_done
();
stop_machine_destroy
();
return
error
;
}
...
...
kernel/module.c
View file @
e7858f52
...
...
@@ -723,16 +723,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
return
-
EFAULT
;
name
[
MODULE_NAME_LEN
-
1
]
=
'\0'
;
/* Create stop_machine threads since free_module relies on
* a non-failing stop_machine call. */
ret
=
stop_machine_create
();
if
(
ret
)
return
ret
;
if
(
mutex_lock_interruptible
(
&
module_mutex
)
!=
0
)
{
ret
=
-
EINTR
;
goto
out_stop
;
}
if
(
mutex_lock_interruptible
(
&
module_mutex
)
!=
0
)
return
-
EINTR
;
mod
=
find_module
(
name
);
if
(
!
mod
)
{
...
...
@@ -792,8 +784,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
out:
mutex_unlock
(
&
module_mutex
);
out_stop:
stop_machine_destroy
();
return
ret
;
}
...
...
kernel/rcutorture.c
View file @
e7858f52
...
...
@@ -669,7 +669,7 @@ static struct rcu_torture_ops sched_expedited_ops = {
.
sync
=
synchronize_sched_expedited
,
.
cb_barrier
=
NULL
,
.
fqs
=
rcu_sched_force_quiescent_state
,
.
stats
=
rcu_expedited_torture_stats
,
.
stats
=
NULL
,
.
irq_capable
=
1
,
.
name
=
"sched_expedited"
};
...
...
kernel/sched.c
View file @
e7858f52
This diff is collapsed.
Click to expand it.
kernel/sched_fair.c
View file @
e7858f52
...
...
@@ -2798,6 +2798,8 @@ static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
return
unlikely
(
sd
->
nr_balance_failed
>
sd
->
cache_nice_tries
+
2
);
}
static
int
active_load_balance_cpu_stop
(
void
*
data
);
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
...
...
@@ -2887,8 +2889,9 @@ static int load_balance(int this_cpu, struct rq *this_rq,
if
(
need_active_balance
(
sd
,
sd_idle
,
idle
))
{
raw_spin_lock_irqsave
(
&
busiest
->
lock
,
flags
);
/* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
/* don't kick the active_load_balance_cpu_stop,
* if the curr task on busiest cpu can't be
* moved to this_cpu
*/
if
(
!
cpumask_test_cpu
(
this_cpu
,
&
busiest
->
curr
->
cpus_allowed
))
{
...
...
@@ -2898,14 +2901,22 @@ static int load_balance(int this_cpu, struct rq *this_rq,
goto
out_one_pinned
;
}
/*
* ->active_balance synchronizes accesses to
* ->active_balance_work. Once set, it's cleared
* only after active load balance is finished.
*/
if
(
!
busiest
->
active_balance
)
{
busiest
->
active_balance
=
1
;
busiest
->
push_cpu
=
this_cpu
;
active_balance
=
1
;
}
raw_spin_unlock_irqrestore
(
&
busiest
->
lock
,
flags
);
if
(
active_balance
)
wake_up_process
(
busiest
->
migration_thread
);
stop_one_cpu_nowait
(
cpu_of
(
busiest
),
active_load_balance_cpu_stop
,
busiest
,
&
busiest
->
active_balance_work
);
/*
* We've kicked active balancing, reset the failure
...
...
@@ -3012,24 +3023,29 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
}
/*
* active_load_balance is run by migration threads. It pushes running tasks
* off the busiest CPU onto idle CPUs. It requires at least 1 task to be
* running on each physical CPU where possible, and avoids physical /
* logical imbalances.
*
* Called with busiest_rq locked.
* active_load_balance_cpu_stop is run by cpu stopper. It pushes
* running tasks off the busiest CPU onto idle CPUs. It requires at
* least 1 task to be running on each physical CPU where possible, and
* avoids physical / logical imbalances.
*/
static
void
active_load_balance
(
struct
rq
*
busiest_rq
,
int
busiest_cpu
)
static
int
active_load_balance_cpu_stop
(
void
*
data
)
{
struct
rq
*
busiest_rq
=
data
;
int
busiest_cpu
=
cpu_of
(
busiest_rq
);
int
target_cpu
=
busiest_rq
->
push_cpu
;
struct
rq
*
target_rq
=
cpu_rq
(
target_cpu
);
struct
sched_domain
*
sd
;
struct
rq
*
target_rq
;
raw_spin_lock_irq
(
&
busiest_rq
->
lock
);
/* make sure the requested cpu hasn't gone down in the meantime */
if
(
unlikely
(
busiest_cpu
!=
smp_processor_id
()
||
!
busiest_rq
->
active_balance
))
goto
out_unlock
;
/* Is there any task to move? */
if
(
busiest_rq
->
nr_running
<=
1
)
return
;
target_rq
=
cpu_rq
(
target_cpu
);
goto
out_unlock
;
/*
* This condition is "impossible", if it occurs
...
...
@@ -3058,6 +3074,10 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
schedstat_inc
(
sd
,
alb_failed
);
}
double_unlock_balance
(
busiest_rq
,
target_rq
);
out_unlock:
busiest_rq
->
active_balance
=
0
;
raw_spin_unlock_irq
(
&
busiest_rq
->
lock
);
return
0
;
}
#ifdef CONFIG_NO_HZ
...
...
kernel/stop_machine.c
View file @
e7858f52
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment