Commit b19ad3b9 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpuidle'

* pm-cpuidle:
  cpuidle: Add a kerneldoc comment to cpuidle_use_deepest_state()
  cpuidle: fix improper return value on error
  intel_idle: Convert to hotplug state machine
  intel_idle: Remove superfluous SMP fuction call
  MAINTAINERS: Add Jacob Pan as a new intel_idle maintainer
  MAINTAINERS: Add bug tracking system location entries for cpuidle
  x86/intel_idle: Add Knights Mill CPUID
  x86/intel_idle: Add CPU model 0x4a (Atom Z34xx series)
  thermal/intel_powerclamp: stop sched tick in forced idle
  thermal/intel_powerclamp: Convert to CPU hotplug state
  thermal/intel_powerclamp: Convert the kthread to kthread worker API
  thermal/intel_powerclamp: Remove duplicated code that starts the kthread
  sched/idle: Add support for tasks that inject idle
  cpuidle: Allow enforcing deepest idle state selection
  cpuidle/powernv: staticise powernv_idle_driver
  cpuidle: dt: assign ->enter_freeze to same as ->enter callback function
  cpuidle: governors: Remove remaining old module code
parents fecc8c0e 404ea9f1
......@@ -3389,6 +3389,7 @@ M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
B: https://bugzilla.kernel.org
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
......@@ -6298,9 +6299,11 @@ S: Maintained
F: drivers/platform/x86/intel-vbtn.c
INTEL IDLE DRIVER
M: Jacob Pan <jacob.jun.pan@linux.intel.com>
M: Len Brown <lenb@kernel.org>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
B: https://bugzilla.kernel.org
S: Supported
F: drivers/idle/intel_idle.c
......
......@@ -22,7 +22,7 @@
#define POWERNV_THRESHOLD_LATENCY_NS 200000
struct cpuidle_driver powernv_idle_driver = {
static struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
.owner = THIS_MODULE,
};
......
......@@ -97,7 +97,23 @@ static int find_deepest_state(struct cpuidle_driver *drv,
return ret;
}
#ifdef CONFIG_SUSPEND
/**
* cpuidle_use_deepest_state - Set/clear governor override flag.
* @enable: New value of the flag.
*
* Set/unset the current CPU to use the deepest idle state (override governors
* going forward if set).
*/
void cpuidle_use_deepest_state(bool enable)
{
struct cpuidle_device *dev;
preempt_disable();
dev = cpuidle_get_device();
dev->use_deepest_state = enable;
preempt_enable();
}
/**
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
......@@ -109,6 +125,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return find_deepest_state(drv, dev, UINT_MAX, 0, false);
}
#ifdef CONFIG_SUSPEND
static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
......
......@@ -38,6 +38,12 @@ static int init_state_node(struct cpuidle_state *idle_state,
* state enter function.
*/
idle_state->enter = match_id->data;
/*
* Since this is not a "coupled" state, it's safe to assume interrupts
* won't be enabled when it exits allowing the tick to be frozen
* safely. So enter() can be also enter_freeze() callback.
*/
idle_state->enter_freeze = match_id->data;
err = of_property_read_u32(state_node, "wakeup-latency-us",
&idle_state->exit_latency);
......
......@@ -9,7 +9,6 @@
*/
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/cpuidle.h>
#include "cpuidle.h"
......@@ -53,14 +52,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
if (cpuidle_curr_governor) {
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_disable_device(dev);
module_put(cpuidle_curr_governor->owner);
}
cpuidle_curr_governor = gov;
if (gov) {
if (!try_module_get(cpuidle_curr_governor->owner))
return -EINVAL;
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_enable_device(dev);
cpuidle_install_idle_handler();
......
......@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/tick.h>
......@@ -177,7 +176,6 @@ static struct cpuidle_governor ladder_governor = {
.enable = ladder_enable_device,
.select = ladder_select_state,
.reflect = ladder_reflect,
.owner = THIS_MODULE,
};
/**
......
......@@ -19,7 +19,6 @@
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/math64.h>
#include <linux/module.h>
/*
* Please note when changing the tuning values:
......@@ -484,7 +483,6 @@ static struct cpuidle_governor menu_governor = {
.enable = menu_enable_device,
.select = menu_select,
.reflect = menu_reflect,
.owner = THIS_MODULE,
};
/**
......
......@@ -403,8 +403,10 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
/* state statistics */
for (i = 0; i < drv->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
if (!kobj) {
ret = -ENOMEM;
goto error_state;
}
kobj->state = &drv->states[i];
kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
......
......@@ -98,8 +98,6 @@ static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static void intel_idle_freeze(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static int intel_idle_cpu_init(int cpu);
static struct cpuidle_state *cpuidle_state_table;
/*
......@@ -724,6 +722,50 @@ static struct cpuidle_state atom_cstates[] = {
{
.enter = NULL }
};
static struct cpuidle_state tangier_cstates[] = {
{
.name = "C1-TNG",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 4,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
.name = "C4-TNG",
.desc = "MWAIT 0x30",
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
.name = "C6-TNG",
.desc = "MWAIT 0x52",
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
.name = "C7-TNG",
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
.name = "C9-TNG",
.desc = "MWAIT 0x64",
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
.enter = NULL }
};
static struct cpuidle_state avn_cstates[] = {
{
.name = "C1-AVN",
......@@ -907,51 +949,15 @@ static void intel_idle_freeze(struct cpuidle_device *dev,
mwait_idle_with_hints(eax, ecx);
}
static void __setup_broadcast_timer(void *arg)
static void __setup_broadcast_timer(bool on)
{
unsigned long on = (unsigned long)arg;
if (on)
tick_broadcast_enable();
else
tick_broadcast_disable();
}
static int cpu_hotplug_notify(struct notifier_block *n,
unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
struct cpuidle_device *dev;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
smp_call_function_single(hotcpu, __setup_broadcast_timer,
(void *)true, 1);
/*
* Some systems can hotplug a cpu at runtime after
* the kernel has booted, we have to initialize the
* driver in this case
*/
dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
if (dev->registered)
break;
if (intel_idle_cpu_init(hotcpu))
return NOTIFY_BAD;
break;
}
return NOTIFY_OK;
}
static struct notifier_block cpu_hotplug_notifier = {
.notifier_call = cpu_hotplug_notify,
};
static void auto_demotion_disable(void *dummy)
static void auto_demotion_disable(void)
{
unsigned long long msr_bits;
......@@ -959,7 +965,7 @@ static void auto_demotion_disable(void *dummy)
msr_bits &= ~(icpu->auto_demotion_disable_flags);
wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
}
static void c1e_promotion_disable(void *dummy)
static void c1e_promotion_disable(void)
{
unsigned long long msr_bits;
......@@ -978,6 +984,10 @@ static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
static const struct idle_cpu idle_cpu_tangier = {
.state_table = tangier_cstates,
};
static const struct idle_cpu idle_cpu_lincroft = {
.state_table = atom_cstates,
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
......@@ -1066,6 +1076,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
......@@ -1084,6 +1095,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl),
ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
{}
......@@ -1373,12 +1385,11 @@ static void __init intel_idle_cpuidle_driver_init(void)
* allocate, initialize, register cpuidle_devices
* @cpu: cpu/core to initialize
*/
static int intel_idle_cpu_init(int cpu)
static int intel_idle_cpu_init(unsigned int cpu)
{
struct cpuidle_device *dev;
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
dev->cpu = cpu;
if (cpuidle_register_device(dev)) {
......@@ -1387,17 +1398,36 @@ static int intel_idle_cpu_init(int cpu)
}
if (icpu->auto_demotion_disable_flags)
smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
auto_demotion_disable();
if (icpu->disable_promotion_to_c1e)
smp_call_function_single(cpu, c1e_promotion_disable, NULL, 1);
c1e_promotion_disable();
return 0;
}
static int intel_idle_cpu_online(unsigned int cpu)
{
struct cpuidle_device *dev;
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
__setup_broadcast_timer(true);
/*
* Some systems can hotplug a cpu at runtime after
* the kernel has booted, we have to initialize the
* driver in this case
*/
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
if (!dev->registered)
return intel_idle_cpu_init(cpu);
return 0;
}
static int __init intel_idle_init(void)
{
int retval, i;
int retval;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
......@@ -1417,35 +1447,29 @@ static int __init intel_idle_init(void)
struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
drv ? drv->name : "none");
free_percpu(intel_idle_cpuidle_devices);
return retval;
goto init_driver_fail;
}
cpu_notifier_register_begin();
for_each_online_cpu(i) {
retval = intel_idle_cpu_init(i);
if (retval) {
intel_idle_cpuidle_devices_uninit();
cpu_notifier_register_done();
cpuidle_unregister_driver(&intel_idle_driver);
free_percpu(intel_idle_cpuidle_devices);
return retval;
}
}
__register_cpu_notifier(&cpu_hotplug_notifier);
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
else
on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
cpu_notifier_register_done();
retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
intel_idle_cpu_online, NULL);
if (retval < 0)
goto hp_setup_fail;
pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
lapic_timer_reliable_states);
return 0;
hp_setup_fail:
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
init_driver_fail:
free_percpu(intel_idle_cpuidle_devices);
return retval;
}
device_initcall(intel_idle_init);
......
This diff is collapsed.
......@@ -245,6 +245,8 @@ void arch_cpu_idle_dead(void);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
void play_idle(unsigned long duration_ms);
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
......
......@@ -74,6 +74,7 @@ struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
unsigned int use_deepest_state:1;
unsigned int cpu;
int last_residency;
......@@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#endif
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern void cpuidle_use_deepest_state(bool enable);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
......@@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable)
{
}
#endif
/* kernel/sched/idle.c */
......@@ -235,8 +240,6 @@ struct cpuidle_governor {
int (*select) (struct cpuidle_driver *drv,
struct cpuidle_device *dev);
void (*reflect) (struct cpuidle_device *dev, int index);
struct module *owner;
};
#ifdef CONFIG_CPU_IDLE
......
......@@ -2254,6 +2254,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
......@@ -2611,7 +2612,7 @@ extern struct task_struct *idle_task(int cpu);
*/
static inline bool is_idle_task(const struct task_struct *p)
{
return p->pid == 0;
return !!(p->flags & PF_IDLE);
}
extern struct task_struct *curr_task(int cpu);
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
......
......@@ -1540,7 +1540,7 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cleanup_count;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
p->flags |= PF_FORKNOEXEC;
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
......
......@@ -5279,6 +5279,7 @@ void init_idle(struct task_struct *idle, int cpu)
__sched_fork(0, idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
idle->flags |= PF_IDLE;
kasan_unpoison_task_stack(idle);
......
......@@ -164,11 +164,14 @@ static void cpuidle_idle_call(void)
* timekeeping to prevent timer interrupts from kicking us out of idle
* until a proper wakeup interrupt happens.
*/
if (idle_should_freeze()) {
entered_state = cpuidle_enter_freeze(drv, dev);
if (entered_state > 0) {
local_irq_enable();
goto exit_idle;
if (idle_should_freeze() || dev->use_deepest_state) {
if (idle_should_freeze()) {
entered_state = cpuidle_enter_freeze(drv, dev);
if (entered_state > 0) {
local_irq_enable();
goto exit_idle;
}
}
next_state = cpuidle_find_deepest_state(drv, dev);
......@@ -202,76 +205,65 @@ static void cpuidle_idle_call(void)
*
* Called with polling cleared.
*/
static void cpu_idle_loop(void)
static void do_idle(void)
{
int cpu = smp_processor_id();
while (1) {
/*
* If the arch has a polling bit, we maintain an invariant:
*
* Our polling bit is clear if we're not scheduled (i.e. if
* rq->curr != rq->idle). This means that, if rq->idle has
* the polling bit set, then setting need_resched is
* guaranteed to cause the cpu to reschedule.
*/
__current_set_polling();
quiet_vmstat();
tick_nohz_idle_enter();
/*
* If the arch has a polling bit, we maintain an invariant:
*
* Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
* rq->idle). This means that, if rq->idle has the polling bit set,
* then setting need_resched is guaranteed to cause the CPU to
* reschedule.
*/
while (!need_resched()) {
check_pgt_cache();
rmb();
__current_set_polling();
tick_nohz_idle_enter();
if (cpu_is_offline(cpu)) {
cpuhp_report_idle_dead();
arch_cpu_idle_dead();
}
while (!need_resched()) {
check_pgt_cache();
rmb();
local_irq_disable();
arch_cpu_idle_enter();
/*
* In poll mode we reenable interrupts and spin.
*
* Also if we detected in the wakeup from idle
* path that the tick broadcast device expired
* for us, we don't want to go deep idle as we
* know that the IPI is going to arrive right
* away
*/
if (cpu_idle_force_poll || tick_check_broadcast_expired())
cpu_idle_poll();
else
cpuidle_idle_call();
arch_cpu_idle_exit();
if (cpu_is_offline(smp_processor_id())) {
cpuhp_report_idle_dead();
arch_cpu_idle_dead();
}
/*
* Since we fell out of the loop above, we know
* TIF_NEED_RESCHED must be set, propagate it into
* PREEMPT_NEED_RESCHED.
*
* This is required because for polling idle loops we will
* not have had an IPI to fold the state for us.
*/
preempt_set_need_resched();
tick_nohz_idle_exit();
__current_clr_polling();
local_irq_disable();
arch_cpu_idle_enter();
/*
* We promise to call sched_ttwu_pending and reschedule
* if need_resched is set while polling is set. That
* means that clearing polling needs to be visible
* before doing these things.
* In poll mode we reenable interrupts and spin. Also if we
* detected in the wakeup from idle path that the tick
* broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away.
*/
smp_mb__after_atomic();
sched_ttwu_pending();
schedule_preempt_disabled();
if (cpu_idle_force_poll || tick_check_broadcast_expired())
cpu_idle_poll();
else
cpuidle_idle_call();
arch_cpu_idle_exit();
}
/*
* Since we fell out of the loop above, we know TIF_NEED_RESCHED must
* be set, propagate it into PREEMPT_NEED_RESCHED.
*
* This is required because for polling idle loops we will not have had
* an IPI to fold the state for us.
*/
preempt_set_need_resched();
tick_nohz_idle_exit();
__current_clr_polling();
/*
* We promise to call sched_ttwu_pending() and reschedule if
* need_resched() is set while polling is set. That means that clearing
* polling needs to be visible before doing these things.
*/
smp_mb__after_atomic();
sched_ttwu_pending();
schedule_preempt_disabled();
}
bool cpu_in_idle(unsigned long pc)
......@@ -280,6 +272,56 @@ bool cpu_in_idle(unsigned long pc)
pc < (unsigned long)__cpuidle_text_end;
}
struct idle_timer {
struct hrtimer timer;
int done;
};
static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
{
struct idle_timer *it = container_of(timer, struct idle_timer, timer);
WRITE_ONCE(it->done, 1);
set_tsk_need_resched(current);
return HRTIMER_NORESTART;
}
void play_idle(unsigned long duration_ms)
{
struct idle_timer it;
/*
* Only FIFO tasks can disable the tick since they don't need the forced
* preemption.
*/
WARN_ON_ONCE(current->policy != SCHED_FIFO);
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
WARN_ON_ONCE(!duration_ms);
rcu_sleep_check();
preempt_disable();
current->flags |= PF_IDLE;
cpuidle_use_deepest_state(true);
it.done = 0;
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
it.timer.function = idle_inject_timer_fn;
hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
while (!READ_ONCE(it.done))
do_idle();
cpuidle_use_deepest_state(false);
current->flags &= ~PF_IDLE;
preempt_fold_need_resched();
preempt_enable();
}
EXPORT_SYMBOL_GPL(play_idle);
void cpu_startup_entry(enum cpuhp_state state)
{
/*
......@@ -299,5 +341,6 @@ void cpu_startup_entry(enum cpuhp_state state)
#endif
arch_cpu_idle_prepare();
cpuhp_online_idle(state);
cpu_idle_loop();
while (1)
do_idle();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment