Commit c89b148f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm+acpi-3.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI and power management fixes from Rafael J Wysocki:

 - Two fixes for the new intel_pstate driver from Dirk Brandewie.

 - Fix for incorrect usage of the .find_bridge() callback from struct
   acpi_bus_type in the USB core and subsequent removal of that callback
   from Rafael J Wysocki.

 - ACPI processor driver cleanups from Chen Gang and Syam Sidhardhan.

 - ACPI initialization and error messages fix from Joe Perches.

 - Operating Performance Points documentation improvement from Nishanth
   Menon.

 - Fixes for memory leaks and potential concurrency issues and sysfs
  attributes leaks during device removal in the core device PM QoS code
  from Rafael J Wysocki.

 - Calxeda Highbank cpufreq driver simplification from Emilio López.

 - cpufreq comment cleanup from Namhyung Kim.

 - Fix for a section mismatch in Calxeda Highbank interprocessor
   communication code from Mark Langsdorf (this is not a PM fix strictly
   speaking, but the code in question went in through the PM tree).

* tag 'pm+acpi-3.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq / intel_pstate: Do not load on VM that does not report max P state.
  cpufreq / intel_pstate: Fix intel_pstate_init() error path
  ACPI / glue: Drop .find_bridge() callback from struct acpi_bus_type
  ACPI / glue: Add .match() callback to struct acpi_bus_type
  ACPI / porocessor: Beautify code, pr->id is u32 which is never < 0
  ACPI / processor: Remove redundant NULL check before kfree
  ACPI / Sleep: Avoid interleaved message on errors
  PM / QoS: Remove device PM QoS sysfs attributes at the right place
  PM / QoS: Fix concurrency issues and memory leaks in device PM QoS
  cpufreq: highbank: do not initialize array with a loop
  PM / OPP: improve introductory documentation
  cpufreq: Fix a typo in comment
  mailbox, pl320-ipc: remove __init from probe function
parents 19cf3edb 6bd51658
*=============* Operating Performance Points (OPP) Library
* OPP Library * ==========================================
*=============*
(C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated (C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
...@@ -16,15 +15,31 @@ Contents ...@@ -16,15 +15,31 @@ Contents
1. Introduction 1. Introduction
=============== ===============
1.1 What is an Operating Performance Point (OPP)?
Complex SoCs of today consists of a multiple sub-modules working in conjunction. Complex SoCs of today consists of a multiple sub-modules working in conjunction.
In an operational system executing varied use cases, not all modules in the SoC In an operational system executing varied use cases, not all modules in the SoC
need to function at their highest performing frequency all the time. To need to function at their highest performing frequency all the time. To
facilitate this, sub-modules in a SoC are grouped into domains, allowing some facilitate this, sub-modules in a SoC are grouped into domains, allowing some
domains to run at lower voltage and frequency while other domains are loaded domains to run at lower voltage and frequency while other domains run at
more. The set of discrete tuples consisting of frequency and voltage pairs that voltage/frequency pairs that are higher.
The set of discrete tuples consisting of frequency and voltage pairs that
the device will support per domain are called Operating Performance Points or the device will support per domain are called Operating Performance Points or
OPPs. OPPs.
As an example:
Let us consider an MPU device which supports the following:
{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
{1GHz at minimum voltage of 1.3V}
We can represent these as three OPPs as the following {Hz, uV} tuples:
{300000000, 1000000}
{800000000, 1200000}
{1000000000, 1300000}
1.2 Operating Performance Points Library
OPP library provides a set of helper functions to organize and query the OPP OPP library provides a set of helper functions to organize and query the OPP
information. The library is located in drivers/base/power/opp.c and the header information. The library is located in drivers/base/power/opp.c and the header
is located in include/linux/opp.h. OPP library can be enabled by enabling is located in include/linux/opp.h. OPP library can be enabled by enabling
......
...@@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type) ...@@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
{ {
if (acpi_disabled) if (acpi_disabled)
return -ENODEV; return -ENODEV;
if (type && type->bus && type->find_device) { if (type && type->match && type->find_device) {
down_write(&bus_type_sem); down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list); list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem); up_write(&bus_type_sem);
printk(KERN_INFO PREFIX "bus type %s registered\n", printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
type->bus->name);
return 0; return 0;
} }
return -ENODEV; return -ENODEV;
...@@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type) ...@@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
down_write(&bus_type_sem); down_write(&bus_type_sem);
list_del_init(&type->list); list_del_init(&type->list);
up_write(&bus_type_sem); up_write(&bus_type_sem);
printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n", printk(KERN_INFO PREFIX "bus type %s unregistered\n",
type->bus->name); type->name);
return 0; return 0;
} }
return -ENODEV; return -ENODEV;
} }
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type); EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type) static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
{ {
struct acpi_bus_type *tmp, *ret = NULL; struct acpi_bus_type *tmp, *ret = NULL;
if (!type)
return NULL;
down_read(&bus_type_sem); down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) { list_for_each_entry(tmp, &bus_type_list, list) {
if (tmp->bus == type) { if (tmp->match(dev)) {
ret = tmp; ret = tmp;
break; break;
} }
...@@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type) ...@@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
return ret; return ret;
} }
static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
{
struct acpi_bus_type *tmp;
int ret = -ENODEV;
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) {
if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
ret = 0;
break;
}
}
up_read(&bus_type_sem);
return ret;
}
static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
void *addr_p, void **ret_p) void *addr_p, void **ret_p)
{ {
...@@ -261,29 +241,12 @@ static int acpi_unbind_one(struct device *dev) ...@@ -261,29 +241,12 @@ static int acpi_unbind_one(struct device *dev)
static int acpi_platform_notify(struct device *dev) static int acpi_platform_notify(struct device *dev)
{ {
struct acpi_bus_type *type; struct acpi_bus_type *type = acpi_get_bus_type(dev);
acpi_handle handle; acpi_handle handle;
int ret; int ret;
ret = acpi_bind_one(dev, NULL); ret = acpi_bind_one(dev, NULL);
if (ret && (!dev->bus || !dev->parent)) { if (ret && type) {
/* bridge devices genernally haven't bus or parent */
ret = acpi_find_bridge_device(dev, &handle);
if (!ret) {
ret = acpi_bind_one(dev, handle);
if (ret)
goto out;
}
}
type = acpi_get_bus_type(dev->bus);
if (ret) {
if (!type || !type->find_device) {
DBG("No ACPI bus support for %s\n", dev_name(dev));
ret = -EINVAL;
goto out;
}
ret = type->find_device(dev, &handle); ret = type->find_device(dev, &handle);
if (ret) { if (ret) {
DBG("Unable to get handle for %s\n", dev_name(dev)); DBG("Unable to get handle for %s\n", dev_name(dev));
...@@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev) ...@@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
{ {
struct acpi_bus_type *type; struct acpi_bus_type *type;
type = acpi_get_bus_type(dev->bus); type = acpi_get_bus_type(dev);
if (type && type->cleanup) if (type && type->cleanup)
type->cleanup(dev); type->cleanup(dev);
......
...@@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) ...@@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
} }
exit: exit:
if (buffer.pointer) kfree(buffer.pointer);
kfree(buffer.pointer);
return apic_id; return apic_id;
} }
......
...@@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) ...@@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
return 0; return 0;
#endif #endif
BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); BUG_ON(pr->id >= nr_cpu_ids);
/* /*
* Buggy BIOS check * Buggy BIOS check
......
...@@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void) ...@@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
status = acpi_get_sleep_type_data(i, &type_a, &type_b); status = acpi_get_sleep_type_data(i, &type_a, &type_b);
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
sleep_states[i] = 1; sleep_states[i] = 1;
pr_cont(" S%d", i);
} }
} }
...@@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void) ...@@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
hibernation_set_ops(old_suspend_ordering ? hibernation_set_ops(old_suspend_ordering ?
&acpi_hibernation_ops_old : &acpi_hibernation_ops); &acpi_hibernation_ops_old : &acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1; sleep_states[ACPI_STATE_S4] = 1;
pr_cont(KERN_CONT " S4");
if (nosigcheck) if (nosigcheck)
return; return;
...@@ -788,6 +786,9 @@ int __init acpi_sleep_init(void) ...@@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
{ {
acpi_status status; acpi_status status;
u8 type_a, type_b; u8 type_a, type_b;
char supported[ACPI_S_STATE_COUNT * 3 + 1];
char *pos = supported;
int i;
if (acpi_disabled) if (acpi_disabled)
return 0; return 0;
...@@ -795,7 +796,6 @@ int __init acpi_sleep_init(void) ...@@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
acpi_sleep_dmi_check(); acpi_sleep_dmi_check();
sleep_states[ACPI_STATE_S0] = 1; sleep_states[ACPI_STATE_S0] = 1;
pr_info(PREFIX "(supports S0");
acpi_sleep_suspend_setup(); acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup(); acpi_sleep_hibernate_setup();
...@@ -803,11 +803,17 @@ int __init acpi_sleep_init(void) ...@@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
sleep_states[ACPI_STATE_S5] = 1; sleep_states[ACPI_STATE_S5] = 1;
pr_cont(" S5");
pm_power_off_prepare = acpi_power_off_prepare; pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off; pm_power_off = acpi_power_off;
} }
pr_cont(")\n");
supported[0] = 0;
for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
if (sleep_states[i])
pos += sprintf(pos, " S%d", i);
}
pr_info(PREFIX "(supports%s)\n", supported);
/* /*
* Register the tts_notifier to reboot notifier list so that the _TTS * Register the tts_notifier to reboot notifier list so that the _TTS
* object can also be evaluated when the system enters S5. * object can also be evaluated when the system enters S5.
......
...@@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle) ...@@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
return -ENODEV; return -ENODEV;
} }
static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
{
return -ENODEV;
}
static struct acpi_bus_type ata_acpi_bus = { static struct acpi_bus_type ata_acpi_bus = {
.find_bridge = ata_acpi_find_dummy, .name = "ATA",
.find_device = ata_acpi_find_device, .find_device = ata_acpi_find_device,
}; };
......
...@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev) ...@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n", dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent)); dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list); list_add_tail(&dev->power.entry, &dpm_list);
dev_pm_qos_constraints_init(dev);
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
} }
...@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev) ...@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion); complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
dev_pm_qos_constraints_destroy(dev);
list_del_init(&dev->power.entry); list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev); device_wakeup_disable(dev);
......
...@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev) ...@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
{ {
if (!dev->power.early_init) { if (!dev->power.early_init) {
spin_lock_init(&dev->power.lock); spin_lock_init(&dev->power.lock);
dev->power.power_state = PMSG_INVALID; dev->power.qos = NULL;
dev->power.early_init = true; dev->power.early_init = true;
} }
} }
...@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *); ...@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
static inline void device_pm_sleep_init(struct device *dev) {} static inline void device_pm_sleep_init(struct device *dev) {}
static inline void device_pm_add(struct device *dev) static inline void device_pm_add(struct device *dev) {}
{
dev_pm_qos_constraints_init(dev);
}
static inline void device_pm_remove(struct device *dev) static inline void device_pm_remove(struct device *dev)
{ {
dev_pm_qos_constraints_destroy(dev);
pm_runtime_remove(dev); pm_runtime_remove(dev);
} }
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/err.h>
#include "power.h" #include "power.h"
...@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) ...@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
struct pm_qos_flags *pqf; struct pm_qos_flags *pqf;
s32 val; s32 val;
if (!qos) if (IS_ERR_OR_NULL(qos))
return PM_QOS_FLAGS_UNDEFINED; return PM_QOS_FLAGS_UNDEFINED;
pqf = &qos->flags; pqf = &qos->flags;
...@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags); ...@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
*/ */
s32 __dev_pm_qos_read_value(struct device *dev) s32 __dev_pm_qos_read_value(struct device *dev)
{ {
return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; return IS_ERR_OR_NULL(dev->power.qos) ?
0 : pm_qos_read_value(&dev->power.qos->latency);
} }
/** /**
...@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) ...@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
return 0; return 0;
} }
/** static void __dev_pm_qos_hide_latency_limit(struct device *dev);
* dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. static void __dev_pm_qos_hide_flags(struct device *dev);
* @dev: target device
*
* Called from the device PM subsystem during device insertion under
* device_pm_lock().
*/
void dev_pm_qos_constraints_init(struct device *dev)
{
mutex_lock(&dev_pm_qos_mtx);
dev->power.qos = NULL;
dev->power.power_state = PMSG_ON;
mutex_unlock(&dev_pm_qos_mtx);
}
/** /**
* dev_pm_qos_constraints_destroy * dev_pm_qos_constraints_destroy
...@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev) ...@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c; struct pm_qos_constraints *c;
struct pm_qos_flags *f; struct pm_qos_flags *f;
mutex_lock(&dev_pm_qos_mtx);
/* /*
* If the device's PM QoS resume latency limit or PM QoS flags have been * If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point. * exposed to user space, they have to be hidden at this point.
*/ */
dev_pm_qos_hide_latency_limit(dev); __dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev); __dev_pm_qos_hide_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
dev->power.power_state = PMSG_INVALID;
qos = dev->power.qos; qos = dev->power.qos;
if (!qos) if (!qos)
goto out; goto out;
...@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) ...@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
} }
spin_lock_irq(&dev->power.lock); spin_lock_irq(&dev->power.lock);
dev->power.qos = NULL; dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers); kfree(c->notifiers);
...@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, ...@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
"%s() called for already added request\n", __func__)) "%s() called for already added request\n", __func__))
return -EINVAL; return -EINVAL;
req->dev = dev;
mutex_lock(&dev_pm_qos_mtx); mutex_lock(&dev_pm_qos_mtx);
if (!dev->power.qos) { if (IS_ERR(dev->power.qos))
if (dev->power.power_state.event == PM_EVENT_INVALID) { ret = -ENODEV;
/* The device has been removed from the system. */ else if (!dev->power.qos)
req->dev = NULL; ret = dev_pm_qos_constraints_allocate(dev);
ret = -ENODEV;
goto out;
} else {
/*
* Allocate the constraints data on the first call to
* add_request, i.e. only if the data is not already
* allocated and if the device has not been removed.
*/
ret = dev_pm_qos_constraints_allocate(dev);
}
}
if (!ret) { if (!ret) {
req->dev = dev;
req->type = type; req->type = type;
ret = apply_constraint(req, PM_QOS_ADD_REQ, value); ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
} }
out:
mutex_unlock(&dev_pm_qos_mtx); mutex_unlock(&dev_pm_qos_mtx);
return ret; return ret;
...@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, ...@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
s32 curr_value; s32 curr_value;
int ret = 0; int ret = 0;
if (!req->dev->power.qos) if (!req) /*guard against callers passing in null */
return -EINVAL;
if (WARN(!dev_pm_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV; return -ENODEV;
switch(req->type) { switch(req->type) {
...@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) ...@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{ {
int ret; int ret;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_update_request(req, new_value);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
int ret;
if (!req) /*guard against callers passing in null */ if (!req) /*guard against callers passing in null */
return -EINVAL; return -EINVAL;
...@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) ...@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
"%s() called for unknown object\n", __func__)) "%s() called for unknown object\n", __func__))
return -EINVAL; return -EINVAL;
mutex_lock(&dev_pm_qos_mtx); if (IS_ERR_OR_NULL(req->dev->power.qos))
ret = __dev_pm_qos_update_request(req, new_value); return -ENODEV;
mutex_unlock(&dev_pm_qos_mtx);
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
/** /**
* dev_pm_qos_remove_request - modifies an existing qos request * dev_pm_qos_remove_request - modifies an existing qos request
...@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); ...@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
*/ */
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{ {
int ret = 0; int ret;
if (!req) /*guard against callers passing in null */
return -EINVAL;
if (WARN(!dev_pm_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
mutex_lock(&dev_pm_qos_mtx); mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_remove_request(req);
if (req->dev->power.qos) {
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
} else {
/* Return if the device has been removed */
ret = -ENODEV;
}
mutex_unlock(&dev_pm_qos_mtx); mutex_unlock(&dev_pm_qos_mtx);
return ret; return ret;
} }
...@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) ...@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
mutex_lock(&dev_pm_qos_mtx); mutex_lock(&dev_pm_qos_mtx);
if (!dev->power.qos) if (IS_ERR(dev->power.qos))
ret = dev->power.power_state.event != PM_EVENT_INVALID ? ret = -ENODEV;
dev_pm_qos_constraints_allocate(dev) : -ENODEV; else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
if (!ret) if (!ret)
ret = blocking_notifier_chain_register( ret = blocking_notifier_chain_register(
...@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev, ...@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
mutex_lock(&dev_pm_qos_mtx); mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */ /* Silently return if the constraints object is not present. */
if (dev->power.qos) if (!IS_ERR_OR_NULL(dev->power.qos))
retval = blocking_notifier_chain_unregister( retval = blocking_notifier_chain_unregister(
dev->power.qos->latency.notifiers, dev->power.qos->latency.notifiers,
notifier); notifier);
...@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); ...@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev, static void __dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type) enum dev_pm_qos_req_type type)
{ {
struct dev_pm_qos_request *req = NULL;
switch(type) { switch(type) {
case DEV_PM_QOS_LATENCY: case DEV_PM_QOS_LATENCY:
dev_pm_qos_remove_request(dev->power.qos->latency_req); req = dev->power.qos->latency_req;
dev->power.qos->latency_req = NULL; dev->power.qos->latency_req = NULL;
break; break;
case DEV_PM_QOS_FLAGS: case DEV_PM_QOS_FLAGS:
dev_pm_qos_remove_request(dev->power.qos->flags_req); req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL; dev->power.qos->flags_req = NULL;
break; break;
} }
__dev_pm_qos_remove_request(req);
kfree(req);
} }
/** /**
...@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) ...@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!device_is_registered(dev) || value < 0) if (!device_is_registered(dev) || value < 0)
return -EINVAL; return -EINVAL;
if (dev->power.qos && dev->power.qos->latency_req)
return -EEXIST;
req = kzalloc(sizeof(*req), GFP_KERNEL); req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) if (!req)
return -ENOMEM; return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
if (ret < 0) if (ret < 0) {
kfree(req);
return ret; return ret;
}
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
else if (dev->power.qos->latency_req)
ret = -EEXIST;
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
goto out;
}
dev->power.qos->latency_req = req; dev->power.qos->latency_req = req;
ret = pm_qos_sysfs_add_latency(dev); ret = pm_qos_sysfs_add_latency(dev);
if (ret) if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
out:
mutex_unlock(&dev_pm_qos_mtx);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
pm_qos_sysfs_remove_latency(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
}
}
/** /**
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
* @dev: Device whose PM QoS latency limit is to be hidden from user space. * @dev: Device whose PM QoS latency limit is to be hidden from user space.
*/ */
void dev_pm_qos_hide_latency_limit(struct device *dev) void dev_pm_qos_hide_latency_limit(struct device *dev)
{ {
if (dev->power.qos && dev->power.qos->latency_req) { mutex_lock(&dev_pm_qos_mtx);
pm_qos_sysfs_remove_latency(dev); __dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); mutex_unlock(&dev_pm_qos_mtx);
}
} }
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
...@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) ...@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (!device_is_registered(dev)) if (!device_is_registered(dev))
return -EINVAL; return -EINVAL;
if (dev->power.qos && dev->power.qos->flags_req)
return -EEXIST;
req = kzalloc(sizeof(*req), GFP_KERNEL); req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) if (!req)
return -ENOMEM; return -ENOMEM;
pm_runtime_get_sync(dev);
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
if (ret < 0) if (ret < 0) {
goto fail; kfree(req);
return ret;
}
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
else if (dev->power.qos->flags_req)
ret = -EEXIST;
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
goto out;
}
dev->power.qos->flags_req = req; dev->power.qos->flags_req = req;
ret = pm_qos_sysfs_add_flags(dev); ret = pm_qos_sysfs_add_flags(dev);
if (ret) if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
fail: out:
mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev); pm_runtime_put(dev);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
pm_qos_sysfs_remove_flags(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
}
}
/** /**
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
* @dev: Device whose PM QoS flags are to be hidden from user space. * @dev: Device whose PM QoS flags are to be hidden from user space.
*/ */
void dev_pm_qos_hide_flags(struct device *dev) void dev_pm_qos_hide_flags(struct device *dev)
{ {
if (dev->power.qos && dev->power.qos->flags_req) { pm_runtime_get_sync(dev);
pm_qos_sysfs_remove_flags(dev); mutex_lock(&dev_pm_qos_mtx);
pm_runtime_get_sync(dev); __dev_pm_qos_hide_flags(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev); pm_runtime_put(dev);
}
} }
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
...@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) ...@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
s32 value; s32 value;
int ret; int ret;
if (!dev->power.qos || !dev->power.qos->flags_req)
return -EINVAL;
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx); mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
ret = -EINVAL;
goto out;
}
value = dev_pm_qos_requested_flags(dev); value = dev_pm_qos_requested_flags(dev);
if (set) if (set)
value |= mask; value |= mask;
...@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) ...@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
out:
mutex_unlock(&dev_pm_qos_mtx); mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev); pm_runtime_put(dev);
return ret; return ret;
} }
#else /* !CONFIG_PM_RUNTIME */
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
static void __dev_pm_qos_hide_flags(struct device *dev) {}
#endif /* CONFIG_PM_RUNTIME */ #endif /* CONFIG_PM_RUNTIME */
...@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev) ...@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev)
{ {
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev); rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group); sysfs_remove_group(&dev->kobj, &pm_attr_group);
......
...@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \ ...@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
* dbs: used as a shortform for demand based switching It helps to keep variable * dbs: used as a shortform for demand based switching It helps to keep variable
* names smaller, simpler * names smaller, simpler
* cdbs: common dbs * cdbs: common dbs
* on_*: On-demand governor * od_*: On-demand governor
* cs_*: Conservative governor * cs_*: Conservative governor
*/ */
......
...@@ -28,13 +28,7 @@ ...@@ -28,13 +28,7 @@
static int hb_voltage_change(unsigned int freq) static int hb_voltage_change(unsigned int freq)
{ {
int i; u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
u32 msg[HB_CPUFREQ_IPC_LEN];
msg[0] = HB_CPUFREQ_CHANGE_NOTE;
msg[1] = freq / 1000000;
for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
msg[i] = 0;
return pl320_ipc_transmit(msg); return pl320_ipc_transmit(msg);
} }
......
...@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) ...@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu]; cpu = all_cpu_data[policy->cpu];
if (!policy->cpuinfo.max_freq)
return -ENODEV;
intel_pstate_get_min_max(cpu, &min, &max); intel_pstate_get_min_max(cpu, &min, &max);
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
...@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = { ...@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
static void intel_pstate_exit(void)
{
int cpu;
sysfs_remove_group(intel_pstate_kobject,
&intel_pstate_attr_group);
debugfs_remove_recursive(debugfs_parent);
cpufreq_unregister_driver(&intel_pstate_driver);
if (!all_cpu_data)
return;
get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
del_timer_sync(&all_cpu_data[cpu]->timer);
kfree(all_cpu_data[cpu]);
}
}
put_online_cpus();
vfree(all_cpu_data);
}
module_exit(intel_pstate_exit);
static int __initdata no_load; static int __initdata no_load;
static int __init intel_pstate_init(void) static int __init intel_pstate_init(void)
{ {
int rc = 0; int cpu, rc = 0;
const struct x86_cpu_id *id; const struct x86_cpu_id *id;
if (no_load) if (no_load)
...@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void) ...@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
intel_pstate_sysfs_expose_params(); intel_pstate_sysfs_expose_params();
return rc; return rc;
out: out:
intel_pstate_exit(); get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
del_timer_sync(&all_cpu_data[cpu]->timer);
kfree(all_cpu_data[cpu]);
}
}
put_online_cpus();
vfree(all_cpu_data);
return -ENODEV; return -ENODEV;
} }
device_initcall(intel_pstate_init); device_initcall(intel_pstate_init);
......
...@@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb) ...@@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb)
} }
EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier); EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
static int __init pl320_probe(struct amba_device *adev, static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
const struct amba_id *id)
{ {
int ret; int ret;
......
...@@ -331,8 +331,14 @@ static void pci_acpi_cleanup(struct device *dev) ...@@ -331,8 +331,14 @@ static void pci_acpi_cleanup(struct device *dev)
} }
} }
static bool pci_acpi_bus_match(struct device *dev)
{
return dev->bus == &pci_bus_type;
}
static struct acpi_bus_type acpi_pci_bus = { static struct acpi_bus_type acpi_pci_bus = {
.bus = &pci_bus_type, .name = "PCI",
.match = pci_acpi_bus_match,
.find_device = acpi_pci_find_device, .find_device = acpi_pci_find_device,
.setup = pci_acpi_setup, .setup = pci_acpi_setup,
.cleanup = pci_acpi_cleanup, .cleanup = pci_acpi_cleanup,
......
...@@ -353,8 +353,14 @@ static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle) ...@@ -353,8 +353,14 @@ static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
/* complete initialization of a PNPACPI device includes having /* complete initialization of a PNPACPI device includes having
* pnpdev->dev.archdata.acpi_handle point to its ACPI sibling. * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
*/ */
static bool acpi_pnp_bus_match(struct device *dev)
{
return dev->bus == &pnp_bus_type;
}
static struct acpi_bus_type __initdata acpi_pnp_bus = { static struct acpi_bus_type __initdata acpi_pnp_bus = {
.bus = &pnp_bus_type, .name = "PNP",
.match = acpi_pnp_bus_match,
.find_device = acpi_pnp_find_device, .find_device = acpi_pnp_find_device,
}; };
......
...@@ -71,9 +71,14 @@ struct kmem_cache *scsi_sdb_cache; ...@@ -71,9 +71,14 @@ struct kmem_cache *scsi_sdb_cache;
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h> #include <acpi/acpi_bus.h>
static bool acpi_scsi_bus_match(struct device *dev)
{
return dev->bus == &scsi_bus_type;
}
int scsi_register_acpi_bus_type(struct acpi_bus_type *bus) int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
{ {
bus->bus = &scsi_bus_type; bus->match = acpi_scsi_bus_match;
return register_acpi_bus_type(bus); return register_acpi_bus_type(bus);
} }
EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type); EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
......
...@@ -210,9 +210,14 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) ...@@ -210,9 +210,14 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
return 0; return 0;
} }
static bool usb_acpi_bus_match(struct device *dev)
{
return is_usb_device(dev) || is_usb_port(dev);
}
static struct acpi_bus_type usb_acpi_bus = { static struct acpi_bus_type usb_acpi_bus = {
.bus = &usb_bus_type, .name = "USB",
.find_bridge = usb_acpi_find_device, .match = usb_acpi_bus_match,
.find_device = usb_acpi_find_device, .find_device = usb_acpi_find_device,
}; };
......
...@@ -437,11 +437,9 @@ void acpi_remove_dir(struct acpi_device *); ...@@ -437,11 +437,9 @@ void acpi_remove_dir(struct acpi_device *);
*/ */
struct acpi_bus_type { struct acpi_bus_type {
struct list_head list; struct list_head list;
struct bus_type *bus; const char *name;
/* For general devices under the bus */ bool (*match)(struct device *dev);
int (*find_device) (struct device *, acpi_handle *); int (*find_device) (struct device *, acpi_handle *);
/* For bridges, such as PCI root bridge, IDE controller */
int (*find_bridge) (struct device *, acpi_handle *);
void (*setup)(struct device *); void (*setup)(struct device *);
void (*cleanup)(struct device *); void (*cleanup)(struct device *);
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment