Commit 1bc27dec authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These fix an intel_idle issue introduced during the 5.16 development
  cycle and two recent regressions in the system reboot/poweroff code.

  Specifics:

   - Fix CPUIDLE_FLAG_IRQ_ENABLE handling in intel_idle (Peter Zijlstra)

   - Allow all platforms to use the global poweroff handler and make
     non-syscall poweroff code paths work again (Dmitry Osipenko)"

* tag 'pm-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpuidle,intel_idle: Fix CPUIDLE_FLAG_IRQ_ENABLE
  kernel/reboot: Fix powering off using a non-syscall code paths
  kernel/reboot: Use static handler for register_platform_power_off()
parents d56fd986 67e59f8d
...@@ -115,6 +115,18 @@ static unsigned int mwait_substates __initdata; ...@@ -115,6 +115,18 @@ static unsigned int mwait_substates __initdata;
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
#define MWAIT2flg(eax) ((eax & 0xFF) << 24) #define MWAIT2flg(eax) ((eax & 0xFF) << 24)
static __always_inline int __intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */
mwait_idle_with_hints(eax, ecx);
return index;
}
/** /**
* intel_idle - Ask the processor to enter the given idle state. * intel_idle - Ask the processor to enter the given idle state.
* @dev: cpuidle device of the target CPU. * @dev: cpuidle device of the target CPU.
...@@ -132,16 +144,19 @@ static unsigned int mwait_substates __initdata; ...@@ -132,16 +144,19 @@ static unsigned int mwait_substates __initdata;
static __cpuidle int intel_idle(struct cpuidle_device *dev, static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
struct cpuidle_state *state = &drv->states[index]; return __intel_idle(dev, drv, index);
unsigned long eax = flg2MWAIT(state->flags); }
unsigned long ecx = 1; /* break on interrupt flag */
if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
local_irq_enable(); struct cpuidle_driver *drv, int index)
{
int ret;
mwait_idle_with_hints(eax, ecx); raw_local_irq_enable();
ret = __intel_idle(dev, drv, index);
raw_local_irq_disable();
return index; return ret;
} }
/** /**
...@@ -1801,6 +1816,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) ...@@ -1801,6 +1816,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
/* Structure copy. */ /* Structure copy. */
drv->states[drv->state_count] = cpuidle_state_table[cstate]; drv->states[drv->state_count] = cpuidle_state_table[cstate];
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
drv->states[drv->state_count].enter = intel_idle_irq;
if ((disabled_states_mask & BIT(drv->state_count)) || if ((disabled_states_mask & BIT(drv->state_count)) ||
((icpu->use_acpi || force_use_acpi) && ((icpu->use_acpi || force_use_acpi) &&
intel_idle_off_by_default(mwait_hint) && intel_idle_off_by_default(mwait_hint) &&
......
...@@ -315,6 +315,43 @@ static int sys_off_notify(struct notifier_block *nb, ...@@ -315,6 +315,43 @@ static int sys_off_notify(struct notifier_block *nb,
return handler->sys_off_cb(&data); return handler->sys_off_cb(&data);
} }
static struct sys_off_handler platform_sys_off_handler;
static struct sys_off_handler *alloc_sys_off_handler(int priority)
{
struct sys_off_handler *handler;
gfp_t flags;
/*
* Platforms like m68k can't allocate sys_off handler dynamically
* at the early boot time because memory allocator isn't available yet.
*/
if (priority == SYS_OFF_PRIO_PLATFORM) {
handler = &platform_sys_off_handler;
if (handler->cb_data)
return ERR_PTR(-EBUSY);
} else {
if (system_state > SYSTEM_RUNNING)
flags = GFP_ATOMIC;
else
flags = GFP_KERNEL;
handler = kzalloc(sizeof(*handler), flags);
if (!handler)
return ERR_PTR(-ENOMEM);
}
return handler;
}
static void free_sys_off_handler(struct sys_off_handler *handler)
{
if (handler == &platform_sys_off_handler)
memset(handler, 0, sizeof(*handler));
else
kfree(handler);
}
/** /**
* register_sys_off_handler - Register sys-off handler * register_sys_off_handler - Register sys-off handler
* @mode: Sys-off mode * @mode: Sys-off mode
...@@ -345,9 +382,9 @@ register_sys_off_handler(enum sys_off_mode mode, ...@@ -345,9 +382,9 @@ register_sys_off_handler(enum sys_off_mode mode,
struct sys_off_handler *handler; struct sys_off_handler *handler;
int err; int err;
handler = kzalloc(sizeof(*handler), GFP_KERNEL); handler = alloc_sys_off_handler(priority);
if (!handler) if (IS_ERR(handler))
return ERR_PTR(-ENOMEM); return handler;
switch (mode) { switch (mode) {
case SYS_OFF_MODE_POWER_OFF_PREPARE: case SYS_OFF_MODE_POWER_OFF_PREPARE:
...@@ -364,7 +401,7 @@ register_sys_off_handler(enum sys_off_mode mode, ...@@ -364,7 +401,7 @@ register_sys_off_handler(enum sys_off_mode mode,
break; break;
default: default:
kfree(handler); free_sys_off_handler(handler);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -391,7 +428,7 @@ register_sys_off_handler(enum sys_off_mode mode, ...@@ -391,7 +428,7 @@ register_sys_off_handler(enum sys_off_mode mode,
} }
if (err) { if (err) {
kfree(handler); free_sys_off_handler(handler);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -409,7 +446,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler) ...@@ -409,7 +446,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler)
{ {
int err; int err;
if (!handler) if (IS_ERR_OR_NULL(handler))
return; return;
if (handler->blocking) if (handler->blocking)
...@@ -422,7 +459,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler) ...@@ -422,7 +459,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler)
/* sanity check, shall never happen */ /* sanity check, shall never happen */
WARN_ON(err); WARN_ON(err);
kfree(handler); free_sys_off_handler(handler);
} }
EXPORT_SYMBOL_GPL(unregister_sys_off_handler); EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
...@@ -584,7 +621,23 @@ static void do_kernel_power_off_prepare(void) ...@@ -584,7 +621,23 @@ static void do_kernel_power_off_prepare(void)
*/ */
void do_kernel_power_off(void) void do_kernel_power_off(void)
{ {
struct sys_off_handler *sys_off = NULL;
/*
* Register sys-off handlers for legacy PM callback. This allows
* legacy PM callbacks temporary co-exist with the new sys-off API.
*
* TODO: Remove legacy handlers once all legacy PM users will be
* switched to the sys-off based APIs.
*/
if (pm_power_off)
sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_DEFAULT,
legacy_pm_power_off, NULL);
atomic_notifier_call_chain(&power_off_handler_list, 0, NULL); atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
unregister_sys_off_handler(sys_off);
} }
/** /**
...@@ -595,7 +648,8 @@ void do_kernel_power_off(void) ...@@ -595,7 +648,8 @@ void do_kernel_power_off(void)
*/ */
bool kernel_can_power_off(void) bool kernel_can_power_off(void)
{ {
return !atomic_notifier_call_chain_is_empty(&power_off_handler_list); return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) ||
pm_power_off;
} }
EXPORT_SYMBOL_GPL(kernel_can_power_off); EXPORT_SYMBOL_GPL(kernel_can_power_off);
...@@ -630,7 +684,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, ...@@ -630,7 +684,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
void __user *, arg) void __user *, arg)
{ {
struct pid_namespace *pid_ns = task_active_pid_ns(current); struct pid_namespace *pid_ns = task_active_pid_ns(current);
struct sys_off_handler *sys_off = NULL;
char buffer[256]; char buffer[256];
int ret = 0; int ret = 0;
...@@ -655,21 +708,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, ...@@ -655,21 +708,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
if (ret) if (ret)
return ret; return ret;
/*
* Register sys-off handlers for legacy PM callback. This allows
* legacy PM callbacks temporary co-exist with the new sys-off API.
*
* TODO: Remove legacy handlers once all legacy PM users will be
* switched to the sys-off based APIs.
*/
if (pm_power_off) {
sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_DEFAULT,
legacy_pm_power_off, NULL);
if (IS_ERR(sys_off))
return PTR_ERR(sys_off);
}
/* Instead of trying to make the power_off code look like /* Instead of trying to make the power_off code look like
* halt when pm_power_off is not set do it the easy way. * halt when pm_power_off is not set do it the easy way.
*/ */
...@@ -727,7 +765,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, ...@@ -727,7 +765,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
break; break;
} }
mutex_unlock(&system_transition_mutex); mutex_unlock(&system_transition_mutex);
unregister_sys_off_handler(sys_off);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment