Commit 002acf1f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-3.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:

 - cpufreq regression fix from Bjørn Mork restoring the pre-3.12
   behavior of the framework during system suspend/hibernation to avoid
   garbage sysfs files from being left behind in case of a suspend error

 - PNP regression fix to restore the correct states of devices after
   resume from hibernation broken in 3.12.  From Dmitry Torokhov.

 - cpuidle fix to prevent cpuidle device unregistration from crashing
   due to a NULL pointer dereference if cpuidle has been disabled from
   the kernel command line.  From Konrad Rzeszutek Wilk.

 - intel_idle fix for the C6 state definition on Intel Avoton/Rangeley
   processors from Arne Bockholdt.

 - Power capping framework fix to make the energy_uj sysfs attribute
   work in accordance with the documentation.  From Srinivas Pandruvada.

 - epoll fix to make it ignore the EPOLLWAKEUP flag if the kernel has
   been compiled with CONFIG_PM_SLEEP unset (in which case that flag
   should not have any effect).  From Amit Pundir.

 - cpufreq fix to prevent governor sysfs files from being lost over
   system suspend/resume in some (arguably unusual) situations.  From
   Viresh Kumar.

* tag 'pm-3.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PowerCap: Fix mode for energy counter
  PNP: fix restoring devices after hibernation
  cpuidle: Check for dev before deregistering it.
  epoll: drop EPOLLWAKEUP if PM_SLEEP is disabled
  cpufreq: fix garbage kobjects on errors during suspend/resume
  cpufreq: suspend governors on system suspend/hibernate
  intel_idle: Fixed C6 state on Avoton/Rangeley processors
parents b52b342d 8e703009
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/async.h> #include <linux/async.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/timer.h> #include <linux/timer.h>
...@@ -540,6 +541,7 @@ static void dpm_resume_noirq(pm_message_t state) ...@@ -540,6 +541,7 @@ static void dpm_resume_noirq(pm_message_t state)
dpm_show_time(starttime, state, "noirq"); dpm_show_time(starttime, state, "noirq");
resume_device_irqs(); resume_device_irqs();
cpuidle_resume(); cpuidle_resume();
cpufreq_resume();
} }
/** /**
...@@ -955,6 +957,7 @@ static int dpm_suspend_noirq(pm_message_t state) ...@@ -955,6 +957,7 @@ static int dpm_suspend_noirq(pm_message_t state)
ktime_t starttime = ktime_get(); ktime_t starttime = ktime_get();
int error = 0; int error = 0;
cpufreq_suspend();
cpuidle_pause(); cpuidle_pause();
suspend_device_irqs(); suspend_device_irqs();
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <trace/events/power.h> #include <trace/events/power.h>
...@@ -47,6 +48,9 @@ static LIST_HEAD(cpufreq_policy_list); ...@@ -47,6 +48,9 @@ static LIST_HEAD(cpufreq_policy_list);
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif #endif
/* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended;
static inline bool has_target(void) static inline bool has_target(void)
{ {
return cpufreq_driver->target_index || cpufreq_driver->target; return cpufreq_driver->target_index || cpufreq_driver->target;
...@@ -1462,6 +1466,41 @@ static struct subsys_interface cpufreq_interface = { ...@@ -1462,6 +1466,41 @@ static struct subsys_interface cpufreq_interface = {
.remove_dev = cpufreq_remove_dev, .remove_dev = cpufreq_remove_dev,
}; };
void cpufreq_suspend(void)
{
struct cpufreq_policy *policy;
if (!has_target())
return;
pr_debug("%s: Suspending Governors\n", __func__);
list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy);
cpufreq_suspended = true;
}
void cpufreq_resume(void)
{
struct cpufreq_policy *policy;
if (!has_target())
return;
pr_debug("%s: Resuming Governors\n", __func__);
cpufreq_suspended = false;
list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
|| __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
pr_err("%s: Failed to start governor for policy: %p\n",
__func__, policy);
}
/** /**
* cpufreq_bp_suspend - Prepare the boot CPU for system suspend. * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
* *
...@@ -1764,6 +1803,10 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, ...@@ -1764,6 +1803,10 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
struct cpufreq_governor *gov = NULL; struct cpufreq_governor *gov = NULL;
#endif #endif
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
if (policy->governor->max_transition_latency && if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency > policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) { policy->governor->max_transition_latency) {
...@@ -2076,9 +2119,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, ...@@ -2076,9 +2119,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
dev = get_cpu_device(cpu); dev = get_cpu_device(cpu);
if (dev) { if (dev) {
if (action & CPU_TASKS_FROZEN)
frozen = true;
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE: case CPU_ONLINE:
__cpufreq_add_dev(dev, NULL, frozen); __cpufreq_add_dev(dev, NULL, frozen);
......
...@@ -400,7 +400,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); ...@@ -400,7 +400,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/ */
void cpuidle_unregister_device(struct cpuidle_device *dev) void cpuidle_unregister_device(struct cpuidle_device *dev)
{ {
if (dev->registered == 0) if (!dev || dev->registered == 0)
return; return;
cpuidle_pause_and_lock(); cpuidle_pause_and_lock();
......
...@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = { ...@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
{ {
.enter = NULL } .enter = NULL }
}; };
static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = { static struct cpuidle_state avn_cstates[] __initdata = {
{ {
.name = "C1-AVN", .name = "C1-AVN",
.desc = "MWAIT 0x00", .desc = "MWAIT 0x00",
...@@ -340,7 +340,7 @@ static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = { ...@@ -340,7 +340,7 @@ static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = {
{ {
.name = "C6-AVN", .name = "C6-AVN",
.desc = "MWAIT 0x51", .desc = "MWAIT 0x51",
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 15, .exit_latency = 15,
.target_residency = 45, .target_residency = 45,
.enter = &intel_idle }, .enter = &intel_idle },
......
...@@ -197,6 +197,11 @@ static int pnp_bus_freeze(struct device *dev) ...@@ -197,6 +197,11 @@ static int pnp_bus_freeze(struct device *dev)
return __pnp_bus_suspend(dev, PMSG_FREEZE); return __pnp_bus_suspend(dev, PMSG_FREEZE);
} }
static int pnp_bus_poweroff(struct device *dev)
{
return __pnp_bus_suspend(dev, PMSG_HIBERNATE);
}
static int pnp_bus_resume(struct device *dev) static int pnp_bus_resume(struct device *dev)
{ {
struct pnp_dev *pnp_dev = to_pnp_dev(dev); struct pnp_dev *pnp_dev = to_pnp_dev(dev);
...@@ -234,9 +239,14 @@ static int pnp_bus_resume(struct device *dev) ...@@ -234,9 +239,14 @@ static int pnp_bus_resume(struct device *dev)
} }
static const struct dev_pm_ops pnp_bus_dev_pm_ops = { static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
/* Suspend callbacks */
.suspend = pnp_bus_suspend, .suspend = pnp_bus_suspend,
.freeze = pnp_bus_freeze,
.resume = pnp_bus_resume, .resume = pnp_bus_resume,
/* Hibernate callbacks */
.freeze = pnp_bus_freeze,
.thaw = pnp_bus_resume,
.poweroff = pnp_bus_poweroff,
.restore = pnp_bus_resume,
}; };
struct bus_type pnp_bus_type = { struct bus_type pnp_bus_type = {
......
...@@ -377,9 +377,14 @@ static void create_power_zone_common_attributes( ...@@ -377,9 +377,14 @@ static void create_power_zone_common_attributes(
if (power_zone->ops->get_max_energy_range_uj) if (power_zone->ops->get_max_energy_range_uj)
power_zone->zone_dev_attrs[count++] = power_zone->zone_dev_attrs[count++] =
&dev_attr_max_energy_range_uj.attr; &dev_attr_max_energy_range_uj.attr;
if (power_zone->ops->get_energy_uj) if (power_zone->ops->get_energy_uj) {
if (power_zone->ops->reset_energy_uj)
dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
else
dev_attr_energy_uj.attr.mode = S_IRUGO;
power_zone->zone_dev_attrs[count++] = power_zone->zone_dev_attrs[count++] =
&dev_attr_energy_uj.attr; &dev_attr_energy_uj.attr;
}
if (power_zone->ops->get_power_uw) if (power_zone->ops->get_power_uw)
power_zone->zone_dev_attrs[count++] = power_zone->zone_dev_attrs[count++] =
&dev_attr_power_uw.attr; &dev_attr_power_uw.attr;
......
...@@ -1852,8 +1852,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, ...@@ -1852,8 +1852,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
goto error_tgt_fput; goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */ /* Check if EPOLLWAKEUP is allowed */
if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) ep_take_care_of_epollwakeup(&epds);
epds.events &= ~EPOLLWAKEUP;
/* /*
* We have to check that the file structure underneath the file descriptor * We have to check that the file structure underneath the file descriptor
......
...@@ -280,6 +280,14 @@ cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy) ...@@ -280,6 +280,14 @@ cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
policy->cpuinfo.max_freq); policy->cpuinfo.max_freq);
} }
#ifdef CONFIG_CPU_FREQ
void cpufreq_suspend(void);
void cpufreq_resume(void);
#else
static inline void cpufreq_suspend(void) {}
static inline void cpufreq_resume(void) {}
#endif
/********************************************************************* /*********************************************************************
* CPUFREQ NOTIFIER INTERFACE * * CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/ *********************************************************************/
......
...@@ -61,5 +61,16 @@ struct epoll_event { ...@@ -61,5 +61,16 @@ struct epoll_event {
__u64 data; __u64 data;
} EPOLL_PACKED; } EPOLL_PACKED;
#ifdef CONFIG_PM_SLEEP
static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
{
if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
epev->events &= ~EPOLLWAKEUP;
}
#else
static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
{
epev->events &= ~EPOLLWAKEUP;
}
#endif
#endif /* _UAPI_LINUX_EVENTPOLL_H */ #endif /* _UAPI_LINUX_EVENTPOLL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment