Commit 9864f5b5 authored by Peter Zijlstra's avatar Peter Zijlstra

cpuidle: Move trace_cpu_idle() into generic code

Remove trace_cpu_idle() from the arch_cpu_idle() implementations and
put it in the generic code, right before disabling RCU. Gets rid of
more trace_*_rcuidle() users.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: default avatarMarco Elver <elver@google.com>
Link: https://lkml.kernel.org/r/20200821085348.428433395@infradead.org
parent bf9282dc
...@@ -298,11 +298,7 @@ static void omap3_pm_idle(void) ...@@ -298,11 +298,7 @@ static void omap3_pm_idle(void)
if (omap_irq_pending()) if (omap_irq_pending())
return; return;
trace_cpu_idle_rcuidle(1, smp_processor_id());
omap_sram_idle(); omap_sram_idle();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} }
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
......
...@@ -123,10 +123,8 @@ void arch_cpu_idle(void) ...@@ -123,10 +123,8 @@ void arch_cpu_idle(void)
* This should do all the clock switching and wait for interrupt * This should do all the clock switching and wait for interrupt
* tricks * tricks
*/ */
trace_cpu_idle_rcuidle(1, smp_processor_id());
cpu_do_idle(); cpu_do_idle();
local_irq_enable(); local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -33,14 +33,13 @@ void enabled_wait(void) ...@@ -33,14 +33,13 @@ void enabled_wait(void)
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY); clear_cpu_flag(CIF_NOHZ_DELAY);
trace_cpu_idle_rcuidle(1, smp_processor_id());
local_irq_save(flags); local_irq_save(flags);
/* Call the assembler magic in entry.S */ /* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask); psw_idle(idle, psw_mask);
local_irq_restore(flags); local_irq_restore(flags);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
/* Account time spent with enabled wait psw loaded as idle time. */ /* Account time spent with enabled wait psw loaded as idle time. */
/* XXX seqcount has tracepoints that require RCU */
write_seqcount_begin(&idle->seqcount); write_seqcount_begin(&idle->seqcount);
idle_time = idle->clock_idle_exit - idle->clock_idle_enter; idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
......
...@@ -684,9 +684,7 @@ void arch_cpu_idle(void) ...@@ -684,9 +684,7 @@ void arch_cpu_idle(void)
*/ */
void __cpuidle default_idle(void) void __cpuidle default_idle(void)
{ {
trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt(); safe_halt();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} }
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle); EXPORT_SYMBOL(default_idle);
...@@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) ...@@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static __cpuidle void mwait_idle(void) static __cpuidle void mwait_idle(void)
{ {
if (!current_set_polling_and_test()) { if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
mb(); /* quirk */ mb(); /* quirk */
clflush((void *)&current_thread_info()->flags); clflush((void *)&current_thread_info()->flags);
...@@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void) ...@@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void)
__sti_mwait(0, 0); __sti_mwait(0, 0);
else else
local_irq_enable(); local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else { } else {
local_irq_enable(); local_irq_enable();
} }
......
...@@ -91,11 +91,14 @@ void __cpuidle default_idle_call(void) ...@@ -91,11 +91,14 @@ void __cpuidle default_idle_call(void)
if (current_clr_polling_and_test()) { if (current_clr_polling_and_test()) {
local_irq_enable(); local_irq_enable();
} else { } else {
trace_cpu_idle(1, smp_processor_id());
stop_critical_timings(); stop_critical_timings();
rcu_idle_enter(); rcu_idle_enter();
arch_cpu_idle(); arch_cpu_idle();
rcu_idle_exit(); rcu_idle_exit();
start_critical_timings(); start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment