Commit b24746c7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  rcu: Teach RCU that idle task is not quiscent state at boot
parents f41bf2ab a6826048
...@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void); ...@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
#define rcu_enter_nohz() do { } while (0) #define rcu_enter_nohz() do { } while (0)
#define rcu_exit_nohz() do { } while (0) #define rcu_exit_nohz() do { } while (0)
/* A context switch is a grace period for rcuclassic. */
static inline int rcu_blocking_is_gp(void)
{
return num_online_cpus() == 1;
}
#endif /* __LINUX_RCUCLASSIC_H */ #endif /* __LINUX_RCUCLASSIC_H */
...@@ -52,6 +52,9 @@ struct rcu_head { ...@@ -52,6 +52,9 @@ struct rcu_head {
void (*func)(struct rcu_head *head); void (*func)(struct rcu_head *head);
}; };
/* Internal to kernel, but needed by rcupreempt.h. */
extern int rcu_scheduler_active;
#if defined(CONFIG_CLASSIC_RCU) #if defined(CONFIG_CLASSIC_RCU)
#include <linux/rcuclassic.h> #include <linux/rcuclassic.h>
#elif defined(CONFIG_TREE_RCU) #elif defined(CONFIG_TREE_RCU)
...@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void); ...@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
/* Internal to kernel */ /* Internal to kernel */
extern void rcu_init(void); extern void rcu_init(void);
extern void rcu_scheduler_starting(void);
extern int rcu_needs_cpu(int cpu); extern int rcu_needs_cpu(int cpu);
#endif /* __LINUX_RCUPDATE_H */ #endif /* __LINUX_RCUPDATE_H */
...@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void) ...@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
#define rcu_exit_nohz() do { } while (0) #define rcu_exit_nohz() do { } while (0)
#endif /* CONFIG_NO_HZ */ #endif /* CONFIG_NO_HZ */
/*
* A context switch is a grace period for rcupreempt synchronize_rcu()
* only during early boot, before the scheduler has been initialized.
* So, how the heck do we get a context switch? Well, if the caller
* invokes synchronize_rcu(), they are willing to accept a context
* switch, so we simply pretend that one happened.
*
* After boot, there might be a blocked or preempted task in an RCU
* read-side critical section, so we cannot then take the fastpath.
*/
static inline int rcu_blocking_is_gp(void)
{
return num_online_cpus() == 1 && !rcu_scheduler_active;
}
#endif /* __LINUX_RCUPREEMPT_H */ #endif /* __LINUX_RCUPREEMPT_H */
...@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void) ...@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
} }
#endif /* CONFIG_NO_HZ */ #endif /* CONFIG_NO_HZ */
/* A context switch is a grace period for rcutree. */
static inline int rcu_blocking_is_gp(void)
{
return num_online_cpus() == 1;
}
#endif /* __LINUX_RCUTREE_H */ #endif /* __LINUX_RCUTREE_H */
...@@ -97,7 +97,7 @@ static inline void mark_rodata_ro(void) { } ...@@ -97,7 +97,7 @@ static inline void mark_rodata_ro(void) { }
extern void tc_init(void); extern void tc_init(void);
#endif #endif
enum system_states system_state; enum system_states system_state __read_mostly;
EXPORT_SYMBOL(system_state); EXPORT_SYMBOL(system_state);
/* /*
...@@ -463,6 +463,7 @@ static noinline void __init_refok rest_init(void) ...@@ -463,6 +463,7 @@ static noinline void __init_refok rest_init(void)
* at least once to get things moving: * at least once to get things moving:
*/ */
init_idle_bootup_task(current); init_idle_bootup_task(current);
rcu_scheduler_starting();
preempt_enable_no_resched(); preempt_enable_no_resched();
schedule(); schedule();
preempt_disable(); preempt_disable();
......
...@@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu) ...@@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
void rcu_check_callbacks(int cpu, int user) void rcu_check_callbacks(int cpu, int user)
{ {
if (user || if (user ||
(idle_cpu(cpu) && !in_softirq() && (idle_cpu(cpu) && rcu_scheduler_active &&
hardirq_count() <= (1 << HARDIRQ_SHIFT))) { !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
/* /*
* Get here if this CPU took its interrupt from user * Get here if this CPU took its interrupt from user
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel_stat.h>
enum rcu_barrier { enum rcu_barrier {
RCU_BARRIER_STD, RCU_BARRIER_STD,
...@@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; ...@@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
static atomic_t rcu_barrier_cpu_count; static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex); static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion; static struct completion rcu_barrier_completion;
int rcu_scheduler_active __read_mostly;
/* /*
* Awaken the corresponding synchronize_rcu() instance now that a * Awaken the corresponding synchronize_rcu() instance now that a
...@@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head) ...@@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head)
void synchronize_rcu(void) void synchronize_rcu(void)
{ {
struct rcu_synchronize rcu; struct rcu_synchronize rcu;
if (rcu_blocking_is_gp())
return;
init_completion(&rcu.completion); init_completion(&rcu.completion);
/* Will wake me after RCU finished. */ /* Will wake me after RCU finished. */
call_rcu(&rcu.head, wakeme_after_rcu); call_rcu(&rcu.head, wakeme_after_rcu);
...@@ -175,3 +181,9 @@ void __init rcu_init(void) ...@@ -175,3 +181,9 @@ void __init rcu_init(void)
__rcu_init(); __rcu_init();
} }
void rcu_scheduler_starting(void)
{
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
rcu_scheduler_active = 1;
}
...@@ -1181,6 +1181,9 @@ void __synchronize_sched(void) ...@@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
{ {
struct rcu_synchronize rcu; struct rcu_synchronize rcu;
if (num_online_cpus() == 1)
return; /* blocking is gp if only one CPU! */
init_completion(&rcu.completion); init_completion(&rcu.completion);
/* Will wake me after RCU finished. */ /* Will wake me after RCU finished. */
call_rcu_sched(&rcu.head, wakeme_after_rcu); call_rcu_sched(&rcu.head, wakeme_after_rcu);
......
...@@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
void rcu_check_callbacks(int cpu, int user) void rcu_check_callbacks(int cpu, int user)
{ {
if (user || if (user ||
(idle_cpu(cpu) && !in_softirq() && (idle_cpu(cpu) && rcu_scheduler_active &&
hardirq_count() <= (1 << HARDIRQ_SHIFT))) { !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
/* /*
* Get here if this CPU took its interrupt from user * Get here if this CPU took its interrupt from user
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment