Commit 4eb86765 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'smp/hotplug' into sched/core, to resolve conflicts

Conflicts:
	kernel/sched/core.c
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents eb60b3e5 e5ef27d0
...@@ -565,7 +565,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -565,7 +565,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
smp_ops->give_timebase(); smp_ops->give_timebase();
/* Wait until cpu puts itself in the online & active maps */ /* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu) || !cpu_active(cpu)) while (!cpu_online(cpu))
cpu_relax(); cpu_relax();
return 0; return 0;
......
...@@ -832,7 +832,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -832,7 +832,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu_attach_task(pcpu, tidle); pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL); pcpu_start_fn(pcpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */ /* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu) || !cpu_active(cpu)) while (!cpu_online(cpu))
cpu_relax(); cpu_relax();
return 0; return 0;
} }
......
...@@ -59,25 +59,7 @@ struct notifier_block; ...@@ -59,25 +59,7 @@ struct notifier_block;
* CPU notifier priorities. * CPU notifier priorities.
*/ */
enum { enum {
/*
* SCHED_ACTIVE marks a cpu which is coming up active during
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first
* notifier. CPUSET_ACTIVE adjusts cpuset according to
* cpu_active mask right after SCHED_ACTIVE. During
* CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
* ordered in the similar way.
*
* This ordering guarantees consistent cpu_active mask and
* migration behavior to all cpu notifiers.
*/
CPU_PRI_SCHED_ACTIVE = INT_MAX,
CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
CPU_PRI_CPUSET_INACTIVE = INT_MIN,
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20, CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
/* bring up workqueues before normal notifiers and down after */ /* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5, CPU_PRI_WORKQUEUE_UP = 5,
......
...@@ -8,6 +8,7 @@ enum cpuhp_state { ...@@ -8,6 +8,7 @@ enum cpuhp_state {
CPUHP_BRINGUP_CPU, CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD, CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE, CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE, CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU, CPUHP_TEARDOWN_CPU,
...@@ -16,6 +17,7 @@ enum cpuhp_state { ...@@ -16,6 +17,7 @@ enum cpuhp_state {
CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
CPUHP_AP_ACTIVE,
CPUHP_ONLINE, CPUHP_ONLINE,
}; };
......
...@@ -743,12 +743,10 @@ set_cpu_present(unsigned int cpu, bool present) ...@@ -743,12 +743,10 @@ set_cpu_present(unsigned int cpu, bool present)
static inline void static inline void
set_cpu_online(unsigned int cpu, bool online) set_cpu_online(unsigned int cpu, bool online)
{ {
if (online) { if (online)
cpumask_set_cpu(cpu, &__cpu_online_mask); cpumask_set_cpu(cpu, &__cpu_online_mask);
cpumask_set_cpu(cpu, &__cpu_active_mask); else
} else {
cpumask_clear_cpu(cpu, &__cpu_online_mask); cpumask_clear_cpu(cpu, &__cpu_online_mask);
}
} }
static inline void static inline void
......
...@@ -374,6 +374,15 @@ extern void cpu_init (void); ...@@ -374,6 +374,15 @@ extern void cpu_init (void);
extern void trap_init(void); extern void trap_init(void);
extern void update_process_times(int user); extern void update_process_times(int user);
extern void scheduler_tick(void); extern void scheduler_tick(void);
extern int sched_cpu_starting(unsigned int cpu);
extern int sched_cpu_activate(unsigned int cpu);
extern int sched_cpu_deactivate(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern int sched_cpu_dying(unsigned int cpu);
#else
# define sched_cpu_dying NULL
#endif
extern void sched_show_task(struct task_struct *p); extern void sched_show_task(struct task_struct *p);
......
...@@ -703,21 +703,6 @@ static int takedown_cpu(unsigned int cpu) ...@@ -703,21 +703,6 @@ static int takedown_cpu(unsigned int cpu)
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err; int err;
/*
* By now we've cleared cpu_active_mask, wait for all preempt-disabled
* and RCU users of this state to go away such that all new such users
* will observe it.
*
* For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
* not imply sync_sched(), so wait for both.
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
if (IS_ENABLED(CONFIG_PREEMPT))
synchronize_rcu_mult(call_rcu, call_rcu_sched);
else
synchronize_rcu();
/* Park the smpboot threads */ /* Park the smpboot threads */
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu); smpboot_park_threads(cpu);
...@@ -923,8 +908,6 @@ void cpuhp_online_idle(enum cpuhp_state state) ...@@ -923,8 +908,6 @@ void cpuhp_online_idle(enum cpuhp_state state)
st->state = CPUHP_AP_ONLINE_IDLE; st->state = CPUHP_AP_ONLINE_IDLE;
/* The cpu is marked online, set it active now */
set_cpu_active(cpu, true);
/* Unpark the stopper thread and the hotplug thread of this cpu */ /* Unpark the stopper thread and the hotplug thread of this cpu */
stop_machine_unpark(cpu); stop_machine_unpark(cpu);
kthread_unpark(st->thread); kthread_unpark(st->thread);
...@@ -1236,6 +1219,12 @@ static struct cpuhp_step cpuhp_ap_states[] = { ...@@ -1236,6 +1219,12 @@ static struct cpuhp_step cpuhp_ap_states[] = {
.name = "ap:offline", .name = "ap:offline",
.cant_stop = true, .cant_stop = true,
}, },
/* First state is scheduler control. Interrupts are disabled */
[CPUHP_AP_SCHED_STARTING] = {
.name = "sched:starting",
.startup = sched_cpu_starting,
.teardown = sched_cpu_dying,
},
/* /*
* Low level startup/teardown notifiers. Run with interrupts * Low level startup/teardown notifiers. Run with interrupts
* disabled. Will be removed once the notifiers are converted to * disabled. Will be removed once the notifiers are converted to
...@@ -1274,6 +1263,15 @@ static struct cpuhp_step cpuhp_ap_states[] = { ...@@ -1274,6 +1263,15 @@ static struct cpuhp_step cpuhp_ap_states[] = {
* The dynamically registered state space is here * The dynamically registered state space is here
*/ */
#ifdef CONFIG_SMP
/* Last state is scheduler control setting the cpu active */
[CPUHP_AP_ACTIVE] = {
.name = "sched:active",
.startup = sched_cpu_activate,
.teardown = sched_cpu_deactivate,
},
#endif
/* CPU is fully up and running. */ /* CPU is fully up and running. */
[CPUHP_ONLINE] = { [CPUHP_ONLINE] = {
.name = "online", .name = "online",
......
This diff is collapsed.
...@@ -7814,7 +7814,7 @@ static void nohz_balancer_kick(void) ...@@ -7814,7 +7814,7 @@ static void nohz_balancer_kick(void)
return; return;
} }
static inline void nohz_balance_exit_idle(int cpu) void nohz_balance_exit_idle(unsigned int cpu)
{ {
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
/* /*
...@@ -7887,18 +7887,6 @@ void nohz_balance_enter_idle(int cpu) ...@@ -7887,18 +7887,6 @@ void nohz_balance_enter_idle(int cpu)
atomic_inc(&nohz.nr_cpus); atomic_inc(&nohz.nr_cpus);
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
} }
static int sched_ilb_notifier(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DYING:
nohz_balance_exit_idle(smp_processor_id());
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
#endif #endif
static DEFINE_SPINLOCK(balancing); static DEFINE_SPINLOCK(balancing);
...@@ -8704,7 +8692,6 @@ __init void init_sched_fair_class(void) ...@@ -8704,7 +8692,6 @@ __init void init_sched_fair_class(void)
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
nohz.next_balance = jiffies; nohz.next_balance = jiffies;
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
cpu_notifier(sched_ilb_notifier, 0);
#endif #endif
#endif /* SMP */ #endif /* SMP */
......
...@@ -1700,6 +1700,10 @@ enum rq_nohz_flag_bits { ...@@ -1700,6 +1700,10 @@ enum rq_nohz_flag_bits {
}; };
#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
extern void nohz_balance_exit_idle(unsigned int cpu);
#else
static inline void nohz_balance_exit_idle(unsigned int cpu) { }
#endif #endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING #ifdef CONFIG_IRQ_TIME_ACCOUNTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment