Commit c9d26423 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm+acpi-3.17-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more ACPI and power management updates from Rafael Wysocki:
 "These are a couple of regression fixes, cpuidle menu governor
  optimizations, fixes for ACPI proccessor and battery drivers,
  hibernation fix to avoid problems related to the e820 memory map,
  fixes for a few cpufreq drivers and a new version of the suspend
  profiling tool analyze_suspend.py.

  Specifics:

   - Fix for an ACPI-based device hotplug regression introduced in 3.14
     that causes a kernel panic to trigger when memory hot-remove is
     attempted with CONFIG_ACPI_HOTPLUG_MEMORY unset from Tang Chen

   - Fix for a cpufreq regression introduced in 3.16 that triggers a
     "sleeping function called from invalid context" bug in
     dev_pm_opp_init_cpufreq_table() from Stephen Boyd

   - ACPI battery driver fix for a warning message added in 3.16 that
     prints silly stuff sometimes from Mariusz Ceier

   - Hibernation fix for safer handling of mismatches in the 820 memory
     map between the configurations during image creation and during the
     subsequent restore from Chun-Yi Lee

   - ACPI processor driver fix to handle CPU hotplug notifications
     correctly during system suspend/resume from Lan Tianyu

   - Series of four cpuidle menu governor cleanups that also should
     speed it up a bit from Mel Gorman

   - Fixes for the speedstep-smi, integrator, cpu0 and arm_big_little
     cpufreq drivers from Hans Wennborg, Himangi Saraogi, Markus
     Pargmann and Uwe Kleine-König

   - Version 3.0 of the analyze_suspend.py suspend profiling tool from
     Todd E Brandt"

* tag 'pm+acpi-3.17-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / battery: Fix warning message in acpi_battery_get_state()
  PM / tools: analyze_suspend.py: update to v3.0
  cpufreq: arm_big_little: fix module license spec
  cpufreq: speedstep-smi: fix decimal printf specifiers
  ACPI / hotplug: Check scan handlers in acpi_scan_hot_remove()
  cpufreq: OPP: Avoid sleeping while atomic
  cpufreq: cpu0: Do not print error message when deferring
  cpufreq: integrator: Use set_cpus_allowed_ptr
  PM / hibernate: avoid unsafe pages in e820 reserved regions
  ACPI / processor: Make acpi_cpu_soft_notify() process CPU FROZEN events
  cpuidle: menu: Lookup CPU runqueues less
  cpuidle: menu: Call nr_iowait_cpu less times
  cpuidle: menu: Use ktime_to_us instead of reinventing the wheel
  cpuidle: menu: Use shifts when calculating averages where possible
parents a11c5c9e af5b7e84
...@@ -540,12 +540,12 @@ static int acpi_battery_get_state(struct acpi_battery *battery) ...@@ -540,12 +540,12 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
*/ */
if (battery->capacity_now > battery->full_charge_capacity if (battery->capacity_now > battery->full_charge_capacity
&& battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) { && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
battery->capacity_now = battery->full_charge_capacity;
if (battery->capacity_now != battery->design_capacity) if (battery->capacity_now != battery->design_capacity)
printk_once(KERN_WARNING FW_BUG printk_once(KERN_WARNING FW_BUG
"battery: reported current charge level (%d) " "battery: reported current charge level (%d) "
"is higher than reported maximum charge level (%d).\n", "is higher than reported maximum charge level (%d).\n",
battery->capacity_now, battery->full_charge_capacity); battery->capacity_now, battery->full_charge_capacity);
battery->capacity_now = battery->full_charge_capacity;
} }
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
......
...@@ -120,6 +120,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, ...@@ -120,6 +120,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_processor *pr = per_cpu(processors, cpu);
struct acpi_device *device; struct acpi_device *device;
action &= ~CPU_TASKS_FROZEN;
/* /*
* CPU_STARTING and CPU_DYING must not sleep. Return here since * CPU_STARTING and CPU_DYING must not sleep. Return here since
......
...@@ -353,7 +353,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device) ...@@ -353,7 +353,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
unsigned long long sta; unsigned long long sta;
acpi_status status; acpi_status status;
if (device->handler->hotplug.demand_offline && !acpi_force_hot_remove) { if (device->handler && device->handler->hotplug.demand_offline
&& !acpi_force_hot_remove) {
if (!acpi_scan_is_offline(device, true)) if (!acpi_scan_is_offline(device, true))
return -EBUSY; return -EBUSY;
} else { } else {
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/pm_opp.h> #include <linux/pm_opp.h>
...@@ -593,3 +594,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) ...@@ -593,3 +594,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = NULL; arm_bL_ops = NULL;
} }
EXPORT_SYMBOL_GPL(bL_cpufreq_unregister); EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
MODULE_LICENSE("GPL v2");
...@@ -114,4 +114,4 @@ module_platform_driver(generic_bL_platdrv); ...@@ -114,4 +114,4 @@ module_platform_driver(generic_bL_platdrv);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT"); MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL v2");
...@@ -137,7 +137,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) ...@@ -137,7 +137,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
* not yet registered, we should try defering probe. * not yet registered, we should try defering probe.
*/ */
if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
ret = -EPROBE_DEFER; ret = -EPROBE_DEFER;
goto out_put_node; goto out_put_node;
} }
......
...@@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, ...@@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
goto out; goto out;
} }
freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL); freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC);
if (!freq_table) { if (!freq_table) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
......
...@@ -92,7 +92,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, ...@@ -92,7 +92,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
* Bind to the specified CPU. When this call returns, * Bind to the specified CPU. When this call returns,
* we should be running on the right CPU. * we should be running on the right CPU.
*/ */
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
BUG_ON(cpu != smp_processor_id()); BUG_ON(cpu != smp_processor_id());
/* get current setting */ /* get current setting */
...@@ -118,7 +118,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, ...@@ -118,7 +118,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
freqs.new = icst_hz(&cclk_params, vco) / 1000; freqs.new = icst_hz(&cclk_params, vco) / 1000;
if (freqs.old == freqs.new) { if (freqs.old == freqs.new) {
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed_ptr(current, &cpus_allowed);
return 0; return 0;
} }
...@@ -141,7 +141,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, ...@@ -141,7 +141,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
/* /*
* Restore the CPUs allowed mask. * Restore the CPUs allowed mask.
*/ */
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed_ptr(current, &cpus_allowed);
cpufreq_freq_transition_end(policy, &freqs, 0); cpufreq_freq_transition_end(policy, &freqs, 0);
...@@ -157,7 +157,7 @@ static unsigned int integrator_get(unsigned int cpu) ...@@ -157,7 +157,7 @@ static unsigned int integrator_get(unsigned int cpu)
cpus_allowed = current->cpus_allowed; cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
BUG_ON(cpu != smp_processor_id()); BUG_ON(cpu != smp_processor_id());
/* detect memory etc. */ /* detect memory etc. */
...@@ -173,7 +173,7 @@ static unsigned int integrator_get(unsigned int cpu) ...@@ -173,7 +173,7 @@ static unsigned int integrator_get(unsigned int cpu)
current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed_ptr(current, &cpus_allowed);
return current_freq; return current_freq;
} }
......
...@@ -324,8 +324,8 @@ static int __init speedstep_init(void) ...@@ -324,8 +324,8 @@ static int __init speedstep_init(void)
return -ENODEV; return -ENODEV;
} }
pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " pr_debug("signature:0x%.8x, command:0x%.8x, "
"event:0x%.8ulx, perf_level:0x%.8ulx.\n", "event:0x%.8x, perf_level:0x%.8x.\n",
ist_info.signature, ist_info.command, ist_info.signature, ist_info.command,
ist_info.event, ist_info.perf_level); ist_info.event, ist_info.perf_level);
......
...@@ -31,7 +31,8 @@ ...@@ -31,7 +31,8 @@
* The default values do not overflow. * The default values do not overflow.
*/ */
#define BUCKETS 12 #define BUCKETS 12
#define INTERVALS 8 #define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
#define RESOLUTION 1024 #define RESOLUTION 1024
#define DECAY 8 #define DECAY 8
#define MAX_INTERESTING 50000 #define MAX_INTERESTING 50000
...@@ -133,15 +134,12 @@ struct menu_device { ...@@ -133,15 +134,12 @@ struct menu_device {
#define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
static int get_loadavg(void) static inline int get_loadavg(unsigned long load)
{ {
unsigned long this = this_cpu_load(); return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
} }
static inline int which_bucket(unsigned int duration) static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
{ {
int bucket = 0; int bucket = 0;
...@@ -151,7 +149,7 @@ static inline int which_bucket(unsigned int duration) ...@@ -151,7 +149,7 @@ static inline int which_bucket(unsigned int duration)
* This allows us to calculate * This allows us to calculate
* E(duration)|iowait * E(duration)|iowait
*/ */
if (nr_iowait_cpu(smp_processor_id())) if (nr_iowaiters)
bucket = BUCKETS/2; bucket = BUCKETS/2;
if (duration < 10) if (duration < 10)
...@@ -174,16 +172,16 @@ static inline int which_bucket(unsigned int duration) ...@@ -174,16 +172,16 @@ static inline int which_bucket(unsigned int duration)
* to be, the higher this multiplier, and thus the higher * to be, the higher this multiplier, and thus the higher
* the barrier to go to an expensive C state. * the barrier to go to an expensive C state.
*/ */
static inline int performance_multiplier(void) static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
{ {
int mult = 1; int mult = 1;
/* for higher loadavg, we are more reluctant */ /* for higher loadavg, we are more reluctant */
mult += 2 * get_loadavg(); mult += 2 * get_loadavg(load);
/* for IO wait tasks (per cpu!) we add 5x each */ /* for IO wait tasks (per cpu!) we add 5x each */
mult += 10 * nr_iowait_cpu(smp_processor_id()); mult += 10 * nr_iowaiters;
return mult; return mult;
} }
...@@ -227,6 +225,9 @@ static void get_typical_interval(struct menu_device *data) ...@@ -227,6 +225,9 @@ static void get_typical_interval(struct menu_device *data)
max = value; max = value;
} }
} }
if (divisor == INTERVALS)
avg >>= INTERVAL_SHIFT;
else
do_div(avg, divisor); do_div(avg, divisor);
/* Then try to determine standard deviation */ /* Then try to determine standard deviation */
...@@ -238,7 +239,11 @@ static void get_typical_interval(struct menu_device *data) ...@@ -238,7 +239,11 @@ static void get_typical_interval(struct menu_device *data)
stddev += diff * diff; stddev += diff * diff;
} }
} }
if (divisor == INTERVALS)
stddev >>= INTERVAL_SHIFT;
else
do_div(stddev, divisor); do_div(stddev, divisor);
/* /*
* The typical interval is obtained when standard deviation is small * The typical interval is obtained when standard deviation is small
* or standard deviation is small compared to the average interval. * or standard deviation is small compared to the average interval.
...@@ -288,7 +293,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -288,7 +293,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i; int i;
unsigned int interactivity_req; unsigned int interactivity_req;
struct timespec t; unsigned long nr_iowaiters, cpu_load;
if (data->needs_update) { if (data->needs_update) {
menu_update(drv, dev); menu_update(drv, dev);
...@@ -302,12 +307,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -302,12 +307,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
return 0; return 0;
/* determine the expected residency time, round up */ /* determine the expected residency time, round up */
t = ktime_to_timespec(tick_nohz_get_sleep_length()); data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());
data->next_timer_us =
t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
data->bucket = which_bucket(data->next_timer_us); get_iowait_load(&nr_iowaiters, &cpu_load);
data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
/* /*
* Force the result of multiplication to be 64 bits even if both * Force the result of multiplication to be 64 bits even if both
...@@ -325,7 +328,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -325,7 +328,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* duration / latency ratio. Adjust the latency limit if * duration / latency ratio. Adjust the latency limit if
* necessary. * necessary.
*/ */
interactivity_req = data->predicted_us / performance_multiplier(); interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
if (latency_req > interactivity_req) if (latency_req > interactivity_req)
latency_req = interactivity_req; latency_req = interactivity_req;
......
...@@ -169,8 +169,7 @@ extern int nr_processes(void); ...@@ -169,8 +169,7 @@ extern int nr_processes(void);
extern unsigned long nr_running(void); extern unsigned long nr_running(void);
extern unsigned long nr_iowait(void); extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu); extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long this_cpu_load(void); extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks); extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void); extern void update_cpu_load_nohz(void);
......
...@@ -954,6 +954,25 @@ static void mark_nosave_pages(struct memory_bitmap *bm) ...@@ -954,6 +954,25 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
} }
} }
static bool is_nosave_page(unsigned long pfn)
{
struct nosave_region *region;
list_for_each_entry(region, &nosave_regions, list) {
if (pfn >= region->start_pfn && pfn < region->end_pfn) {
pr_err("PM: %#010llx in e820 nosave region: "
"[mem %#010llx-%#010llx]\n",
(unsigned long long) pfn << PAGE_SHIFT,
(unsigned long long) region->start_pfn << PAGE_SHIFT,
((unsigned long long) region->end_pfn << PAGE_SHIFT)
- 1);
return true;
}
}
return false;
}
/** /**
* create_basic_memory_bitmaps - create bitmaps needed for marking page * create_basic_memory_bitmaps - create bitmaps needed for marking page
* frames that should not be saved and free page frames. The pointers * frames that should not be saved and free page frames. The pointers
...@@ -2015,7 +2034,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm) ...@@ -2015,7 +2034,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
do { do {
pfn = memory_bm_next_pfn(bm); pfn = memory_bm_next_pfn(bm);
if (likely(pfn != BM_END_OF_MAP)) { if (likely(pfn != BM_END_OF_MAP)) {
if (likely(pfn_valid(pfn))) if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
swsusp_set_page_free(pfn_to_page(pfn)); swsusp_set_page_free(pfn_to_page(pfn));
else else
return -EFAULT; return -EFAULT;
......
...@@ -2393,6 +2393,13 @@ unsigned long nr_iowait_cpu(int cpu) ...@@ -2393,6 +2393,13 @@ unsigned long nr_iowait_cpu(int cpu)
return atomic_read(&this->nr_iowait); return atomic_read(&this->nr_iowait);
} }
void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
{
struct rq *this = this_rq();
*nr_waiters = atomic_read(&this->nr_iowait);
*load = this->cpu_load[0];
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
......
...@@ -8,13 +8,6 @@ ...@@ -8,13 +8,6 @@
#include "sched.h" #include "sched.h"
unsigned long this_cpu_load(void)
{
struct rq *this = this_rq();
return this->cpu_load[0];
}
/* /*
* Global load-average calculations * Global load-average calculations
* *
......
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment