Commit c4ec2071 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (41 commits)
  ACPICA: hw: Don't carry spinlock over suspend
  ACPICA: hw: remove use_lock flag from acpi_hw_register_{read, write}
  ACPI: cpuidle: port idle timer suspend/resume workaround to cpuidle
  ACPI: clean up acpi_enter_sleep_state_prep
  Hibernation: Make sure that ACPI is enabled in acpi_hibernation_finish
  ACPI: suppress uninitialized var warning
  cpuidle: consolidate 2.6.22 cpuidle branch into one patch
  ACPI: thinkpad-acpi: skip blanks before the data when parsing sysfs
  ACPI: AC: Add sysfs interface
  ACPI: SBS: Add sysfs alarm
  ACPI: SBS: Add ACPI_PROCFS around procfs handling code.
  ACPI: SBS: Add support for power_supply class (and sysfs)
  ACPI: SBS: Make SBS reads table-driven.
  ACPI: SBS: Simplify data structures in SBS
  ACPI: SBS: Split host controller (ACPI0001) from SBS driver (ACPI0002)
  ACPI: EC: Add new query handler to list head.
  ACPI: Add acpi_bus_generate_event4() function
  ACPI: Battery: add sysfs alarm
  ACPI: Battery: Add sysfs support
  ACPI: Battery: Misc clean-ups, no functional changes
  ...

Fix up conflicts in drivers/misc/thinkpad_acpi.[ch] manually
parents ec262681 00a2b433
......@@ -105,10 +105,15 @@ The version of thinkpad-acpi's sysfs interface is exported by the driver
as a driver attribute (see below).
Sysfs driver attributes are on the driver's sysfs attribute space,
for 2.6.20 this is /sys/bus/platform/drivers/thinkpad_acpi/.
for 2.6.23 this is /sys/bus/platform/drivers/thinkpad_acpi/ and
/sys/bus/platform/drivers/thinkpad_hwmon/
Sysfs device attributes are on the driver's sysfs attribute space,
for 2.6.20 this is /sys/devices/platform/thinkpad_acpi/.
Sysfs device attributes are on the thinkpad_acpi device sysfs attribute
space, for 2.6.23 this is /sys/devices/platform/thinkpad_acpi/.
Sysfs device attributes for the sensors and fan are on the
thinkpad_hwmon device's sysfs attribute space, but you should locate it
looking for a hwmon device with the name attribute of "thinkpad".
Driver version
--------------
......@@ -766,7 +771,7 @@ Temperature sensors
-------------------
procfs: /proc/acpi/ibm/thermal
sysfs device attributes: (hwmon) temp*_input
sysfs device attributes: (hwmon "thinkpad") temp*_input
Most ThinkPads include six or more separate temperature sensors but only
expose the CPU temperature through the standard ACPI methods. This
......@@ -989,7 +994,9 @@ Fan control and monitoring: fan speed, fan enable/disable
---------------------------------------------------------
procfs: /proc/acpi/ibm/fan
sysfs device attributes: (hwmon) fan_input, pwm1, pwm1_enable
sysfs device attributes: (hwmon "thinkpad") fan1_input, pwm1,
pwm1_enable
sysfs hwmon driver attributes: fan_watchdog
NOTE NOTE NOTE: fan control operations are disabled by default for
safety reasons. To enable them, the module parameter "fan_control=1"
......@@ -1131,7 +1138,7 @@ hwmon device attribute fan1_input:
which can take up to two minutes. May return rubbish on older
ThinkPads.
driver attribute fan_watchdog:
hwmon driver attribute fan_watchdog:
Fan safety watchdog timer interval, in seconds. Minimum is
1 second, maximum is 120 seconds. 0 disables the watchdog.
......@@ -1233,3 +1240,9 @@ Sysfs interface changelog:
layer, the radio switch generates input event EV_RADIO,
and the driver enables hot key handling by default in
the firmware.
0x020000: ABI fix: added a separate hwmon platform device and
driver, which must be located by name (thinkpad)
and the hwmon class for libsensors4 (lm-sensors 3)
compatibility. Moved all hwmon attributes to this
new platform device.
......@@ -1082,6 +1082,8 @@ endif # APM
source "arch/x86/kernel/cpu/cpufreq/Kconfig"
source "drivers/cpuidle/Kconfig"
endmenu
menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
......
......@@ -725,6 +725,8 @@ source "drivers/acpi/Kconfig"
source "arch/x86/kernel/cpufreq/Kconfig"
source "drivers/cpuidle/Kconfig"
endmenu
menu "Bus options (PCI etc.)"
......
......@@ -76,6 +76,7 @@ obj-$(CONFIG_MCA) += mca/
obj-$(CONFIG_EISA) += eisa/
obj-$(CONFIG_LGUEST_GUEST) += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
......
......@@ -88,7 +88,7 @@ config ACPI_PROC_EVENT
config ACPI_AC
tristate "AC Adapter"
depends on X86
depends on X86 && POWER_SUPPLY
default y
help
This driver adds support for the AC Adapter object, which indicates
......@@ -97,7 +97,7 @@ config ACPI_AC
config ACPI_BATTERY
tristate "Battery"
depends on X86
depends on X86 && POWER_SUPPLY
default y
help
This driver adds support for battery information through
......@@ -117,6 +117,7 @@ config ACPI_BUTTON
config ACPI_VIDEO
tristate "Video"
depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
depends on INPUT
help
This driver implement the ACPI Extensions For Display Adapters
for integrated graphics devices on motherboard, as specified in
......@@ -349,12 +350,11 @@ config ACPI_HOTPLUG_MEMORY
$>modprobe acpi_memhotplug
config ACPI_SBS
tristate "Smart Battery System (EXPERIMENTAL)"
tristate "Smart Battery System"
depends on X86
depends on EXPERIMENTAL
depends on POWER_SUPPLY
help
This driver adds support for the Smart Battery System.
A "Smart Battery" is quite old and quite rare compared
to today's ACPI "Control Method" battery.
This driver adds support for the Smart Battery System, another
type of access to battery information, found on some laptops.
endif # ACPI
......@@ -60,3 +60,4 @@ obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
obj-y += cm_sbs.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
......@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/power_supply.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
......@@ -72,16 +73,37 @@ static struct acpi_driver acpi_ac_driver = {
};
struct acpi_ac {
struct power_supply charger;
struct acpi_device * device;
unsigned long state;
};
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
static const struct file_operations acpi_ac_fops = {
.open = acpi_ac_open_fs,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct acpi_ac *ac = to_acpi_ac(psy);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = ac->state;
break;
default:
return -EINVAL;
}
return 0;
}
static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
/* --------------------------------------------------------------------------
AC Adapter Management
......@@ -208,6 +230,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
acpi_bus_generate_netlink_event(device->pnp.device_class,
device->dev.bus_id, event,
(u32) ac->state);
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
......@@ -244,7 +267,12 @@ static int acpi_ac_add(struct acpi_device *device)
result = acpi_ac_add_fs(device);
if (result)
goto end;
ac->charger.name = acpi_device_bid(device);
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
power_supply_register(&ac->device->dev, &ac->charger);
status = acpi_install_notify_handler(device->handle,
ACPI_ALL_NOTIFY, acpi_ac_notify,
ac);
......@@ -279,7 +307,8 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
status = acpi_remove_notify_handler(device->handle,
ACPI_ALL_NOTIFY, acpi_ac_notify);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
acpi_ac_remove_fs(device);
kfree(ac);
......
This diff is collapsed.
......@@ -286,15 +286,11 @@ DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue);
extern int event_is_open;
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, u8 type, int data)
{
struct acpi_bus_event *event = NULL;
struct acpi_bus_event *event;
unsigned long flags = 0;
if (!device)
return -EINVAL;
/* drop event on the floor if no one's listening */
if (!event_is_open)
return 0;
......@@ -303,8 +299,8 @@ int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
if (!event)
return -ENOMEM;
strcpy(event->device_class, device->pnp.device_class);
strcpy(event->bus_id, device->pnp.bus_id);
strcpy(event->device_class, device_class);
strcpy(event->bus_id, bus_id);
event->type = type;
event->data = data;
......@@ -315,6 +311,17 @@ int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
wake_up_interruptible(&acpi_bus_event_queue);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_bus_generate_proc_event4);
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
{
if (!device)
return -EINVAL;
return acpi_bus_generate_proc_event4(device->pnp.device_class,
device->pnp.bus_id, type, data);
}
EXPORT_SYMBOL(acpi_bus_generate_proc_event);
......
......@@ -121,6 +121,7 @@ static struct acpi_ec {
atomic_t event_count;
wait_queue_head_t wait;
struct list_head list;
u8 handlers_installed;
} *boot_ec, *first_ec;
/* --------------------------------------------------------------------------
......@@ -425,7 +426,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
handler->func = func;
handler->data = data;
mutex_lock(&ec->lock);
list_add_tail(&handler->node, &ec->list);
list_add(&handler->node, &ec->list);
mutex_unlock(&ec->lock);
return 0;
}
......@@ -440,7 +441,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
if (query_bit == handler->query_bit) {
list_del(&handler->node);
kfree(handler);
break;
}
}
mutex_unlock(&ec->lock);
......@@ -680,32 +680,50 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe);
if (ACPI_FAILURE(status))
return status;
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, handle, 1,
acpi_ec_register_query_methods, ec, NULL);
/* Use the global lock for all EC transactions? */
acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock);
ec->handle = handle;
printk(KERN_INFO PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
return AE_CTRL_TERMINATE;
}
static void ec_remove_handlers(struct acpi_ec *ec)
{
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
printk(KERN_ERR PREFIX "failed to remove space handler\n");
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
printk(KERN_ERR PREFIX "failed to remove gpe handler\n");
ec->handlers_installed = 0;
}
static int acpi_ec_add(struct acpi_device *device)
{
struct acpi_ec *ec = NULL;
if (!device)
return -EINVAL;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
/* Check for boot EC */
if (boot_ec) {
if (boot_ec->handle == device->handle) {
/* Pre-loaded EC from DSDT, just move pointer */
ec = boot_ec;
boot_ec = NULL;
goto end;
} else if (boot_ec->handle == ACPI_ROOT_OBJECT) {
/* ECDT-based EC, time to shut it down */
ec_remove_handlers(boot_ec);
kfree(boot_ec);
first_ec = boot_ec = NULL;
}
}
ec = make_acpi_ec();
if (!ec)
return -ENOMEM;
......@@ -715,25 +733,14 @@ static int acpi_ec_add(struct acpi_device *device)
kfree(ec);
return -EINVAL;
}
/* Check if we found the boot EC */
if (boot_ec) {
if (boot_ec->gpe == ec->gpe) {
/* We might have incorrect info for GL at boot time */
mutex_lock(&boot_ec->lock);
boot_ec->global_lock = ec->global_lock;
/* Copy handlers from new ec into boot ec */
list_splice(&ec->list, &boot_ec->list);
mutex_unlock(&boot_ec->lock);
kfree(ec);
ec = boot_ec;
}
} else
first_ec = ec;
ec->handle = device->handle;
end:
if (!first_ec)
first_ec = ec;
acpi_driver_data(device) = ec;
acpi_ec_add_fs(device);
printk(KERN_INFO PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
return 0;
}
......@@ -756,10 +763,7 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
acpi_driver_data(device) = NULL;
if (ec == first_ec)
first_ec = NULL;
/* Don't touch boot EC */
if (boot_ec != ec)
kfree(ec);
kfree(ec);
return 0;
}
......@@ -789,6 +793,8 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
static int ec_install_handlers(struct acpi_ec *ec)
{
acpi_status status;
if (ec->handlers_installed)
return 0;
status = acpi_install_gpe_handler(NULL, ec->gpe,
ACPI_GPE_EDGE_TRIGGERED,
&acpi_ec_gpe_handler, ec);
......@@ -807,6 +813,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
return -ENODEV;
}
ec->handlers_installed = 1;
return 0;
}
......@@ -823,41 +830,22 @@ static int acpi_ec_start(struct acpi_device *device)
if (!ec)
return -EINVAL;
/* Boot EC is already working */
if (ec != boot_ec)
ret = ec_install_handlers(ec);
ret = ec_install_handlers(ec);
/* EC is fully operational, allow queries */
atomic_set(&ec->query_pending, 0);
return ret;
}
static int acpi_ec_stop(struct acpi_device *device, int type)
{
acpi_status status;
struct acpi_ec *ec;
if (!device)
return -EINVAL;
ec = acpi_driver_data(device);
if (!ec)
return -EINVAL;
/* Don't touch boot EC */
if (ec == boot_ec)
return 0;
status = acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler);
if (ACPI_FAILURE(status))
return -ENODEV;
status = acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler);
if (ACPI_FAILURE(status))
return -ENODEV;
ec_remove_handlers(ec);
return 0;
}
......@@ -877,7 +865,7 @@ int __init acpi_ec_ecdt_probe(void)
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
if (ACPI_SUCCESS(status)) {
printk(KERN_INFO PREFIX "EC description table is found, configuring boot EC\n\n");
printk(KERN_INFO PREFIX "EC description table is found, configuring boot EC\n");
boot_ec->command_addr = ecdt_ptr->control.address;
boot_ec->data_addr = ecdt_ptr->data.address;
boot_ec->gpe = ecdt_ptr->gpe;
......@@ -899,7 +887,6 @@ int __init acpi_ec_ecdt_probe(void)
error:
kfree(boot_ec);
boot_ec = NULL;
return -ENODEV;
}
......
......@@ -239,10 +239,8 @@ u32 acpi_ev_fixed_event_detect(void)
* Read the fixed feature status and enable registers, as all the cases
* depend on their values. Ignore errors here.
*/
(void)acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS, &fixed_status);
(void)acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
(void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
(void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
"Fixed Event Block: Enable %08X Status %08X\n",
......
......@@ -75,8 +75,7 @@ acpi_status acpi_hw_clear_acpi_status(void)
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS,
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
ACPI_BITMASK_ALL_FIXED_STATUS);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
......@@ -259,7 +258,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
*
******************************************************************************/
acpi_status acpi_get_register(u32 register_id, u32 * return_value)
acpi_status acpi_get_register_unlocked(u32 register_id, u32 * return_value)
{
u32 register_value = 0;
struct acpi_bit_register_info *bit_reg_info;
......@@ -276,8 +275,7 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value)
/* Read from the register */
status = acpi_hw_register_read(ACPI_MTX_LOCK,
bit_reg_info->parent_register,
status = acpi_hw_register_read(bit_reg_info->parent_register,
&register_value);
if (ACPI_SUCCESS(status)) {
......@@ -298,6 +296,16 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value)
return_ACPI_STATUS(status);
}
acpi_status acpi_get_register(u32 register_id, u32 * return_value)
{
acpi_status status;
acpi_cpu_flags flags;
flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
status = acpi_get_register_unlocked(register_id, return_value);
acpi_os_release_lock(acpi_gbl_hardware_lock, flags);
return status;
}
ACPI_EXPORT_SYMBOL(acpi_get_register)
/*******************************************************************************
......@@ -335,8 +343,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
/* Always do a register read first so we can insert the new bits */
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
bit_reg_info->parent_register,
status = acpi_hw_register_read(bit_reg_info->parent_register,
&register_value);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
......@@ -363,8 +370,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
bit_reg_info->
access_bit_mask);
if (value) {
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS,
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
(u16) value);
register_value = 0;
}
......@@ -377,8 +383,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
bit_reg_info->access_bit_mask,
value);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_ENABLE,
status = acpi_hw_register_write(ACPI_REGISTER_PM1_ENABLE,
(u16) register_value);
break;
......@@ -397,15 +402,13 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
bit_reg_info->access_bit_mask,
value);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_CONTROL,
status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
(u16) register_value);
break;
case ACPI_REGISTER_PM2_CONTROL:
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM2_CONTROL,
status = acpi_hw_register_read(ACPI_REGISTER_PM2_CONTROL,
&register_value);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
......@@ -430,8 +433,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
xpm2_control_block.
address)));
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM2_CONTROL,
status = acpi_hw_register_write(ACPI_REGISTER_PM2_CONTROL,
(u8) (register_value));
break;
......@@ -461,8 +463,7 @@ ACPI_EXPORT_SYMBOL(acpi_set_register)
*
* FUNCTION: acpi_hw_register_read
*
* PARAMETERS: use_lock - Lock hardware? True/False
* register_id - ACPI Register ID
* PARAMETERS: register_id - ACPI Register ID
* return_value - Where the register value is returned
*
* RETURN: Status and the value read.
......@@ -471,19 +472,14 @@ ACPI_EXPORT_SYMBOL(acpi_set_register)
*
******************************************************************************/
acpi_status
acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
acpi_hw_register_read(u32 register_id, u32 * return_value)
{
u32 value1 = 0;
u32 value2 = 0;
acpi_status status;
acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE(hw_register_read);
if (ACPI_MTX_LOCK == use_lock) {
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
......@@ -491,7 +487,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
acpi_hw_low_level_read(16, &value1,
&acpi_gbl_FADT.xpm1a_event_block);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
goto exit;
}
/* PM1B is optional */
......@@ -507,7 +503,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
status =
acpi_hw_low_level_read(16, &value1, &acpi_gbl_xpm1a_enable);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
goto exit;
}
/* PM1B is optional */
......@@ -523,7 +519,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
acpi_hw_low_level_read(16, &value1,
&acpi_gbl_FADT.xpm1a_control_block);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
goto exit;
}
status =
......@@ -558,10 +554,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
break;
}
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
exit:
if (ACPI_SUCCESS(status)) {
*return_value = value1;
......@@ -574,8 +567,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
*
* FUNCTION: acpi_hw_register_write
*
* PARAMETERS: use_lock - Lock hardware? True/False
* register_id - ACPI Register ID
* PARAMETERS: register_id - ACPI Register ID
* Value - The value to write
*
* RETURN: Status
......@@ -597,28 +589,22 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
*
******************************************************************************/
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
acpi_status acpi_hw_register_write(u32 register_id, u32 value)
{
acpi_status status;
acpi_cpu_flags lock_flags = 0;
u32 read_value;
ACPI_FUNCTION_TRACE(hw_register_write);
if (ACPI_MTX_LOCK == use_lock) {
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
/* Perform a read first to preserve certain bits (per ACPI spec) */
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS,
status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS,
&read_value);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
goto exit;
}
/* Insert the bits to be preserved */
......@@ -632,7 +618,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
acpi_hw_low_level_write(16, value,
&acpi_gbl_FADT.xpm1a_event_block);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
goto exit;
}
/* PM1B is optional */
......@@ -647,7 +633,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
status =
acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1a_enable);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
goto exit;
}
/* PM1B is optional */
......@@ -661,11 +647,10 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
/*
* Perform a read first to preserve certain bits (per ACPI spec)
*/
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_CONTROL,
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
&read_value);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
goto exit;
}
/* Insert the bits to be preserved */
......@@ -679,7 +664,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
acpi_hw_low_level_write(16, value,
&acpi_gbl_FADT.xpm1a_control_block);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
goto exit;
}
status =
......@@ -728,11 +713,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
break;
}
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
exit:
return_ACPI_STATUS(status);
}
......
......@@ -234,15 +234,11 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
"While executing method _SST"));
}
/*
* 1) Disable/Clear all GPEs
*/
/* Disable/Clear all GPEs */
status = acpi_hw_disable_all_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
return_ACPI_STATUS(AE_OK);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
......@@ -313,8 +309,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
/* Get current value of PM1A control */
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
......@@ -341,15 +336,13 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
/* Write #1: fill in SLP_TYP data */
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1A_CONTROL,
status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
PM1Acontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1B_CONTROL,
status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
PM1Bcontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
......@@ -364,15 +357,13 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
ACPI_FLUSH_CPU_CACHE();
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1A_CONTROL,
status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
PM1Acontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1B_CONTROL,
status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
PM1Bcontrol);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
......@@ -392,8 +383,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
*/
acpi_os_stall(10000000);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_CONTROL,
status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
sleep_enable_reg_info->
access_bit_mask);
if (ACPI_FAILURE(status)) {
......@@ -404,7 +394,8 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
/* Wait until we enter sleep state */
do {
status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
status = acpi_get_register_unlocked(ACPI_BITREG_WAKE_STATUS,
&in_value);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
......@@ -520,8 +511,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
/* Get current value of PM1A control */
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_CONTROL,
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
&PM1Acontrol);
if (ACPI_SUCCESS(status)) {
......@@ -543,11 +533,9 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
/* Just ignore any errors */
(void)acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1A_CONTROL,
(void)acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
PM1Acontrol);
(void)acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1B_CONTROL,
(void)acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
PM1Bcontrol);
}
}
......
......@@ -1042,14 +1042,6 @@ static int __init acpi_wake_gpes_always_on_setup(char *str)
__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
/*
* max_cstate is defined in the base kernel so modules can
* change it w/o depending on the state of the processor module.
*/
unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER;
EXPORT_SYMBOL(max_cstate);
/*
* Acquire a spinlock.
*
......
......@@ -44,6 +44,7 @@
#include <linux/seq_file.h>
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
#include <asm/io.h>
#include <asm/system.h>
......@@ -1049,11 +1050,13 @@ static int __init acpi_processor_init(void)
return -ENOMEM;
acpi_processor_dir->owner = THIS_MODULE;
result = cpuidle_register_driver(&acpi_idle_driver);
if (result < 0)
goto out_proc;
result = acpi_bus_register_driver(&acpi_processor_driver);
if (result < 0) {
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
return result;
}
if (result < 0)
goto out_cpuidle;
acpi_processor_install_hotplug_notify();
......@@ -1062,11 +1065,18 @@ static int __init acpi_processor_init(void)
acpi_processor_ppc_init();
return 0;
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
out_proc:
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
return result;
}
static void __exit acpi_processor_exit(void)
{
acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit();
......@@ -1075,6 +1085,8 @@ static void __exit acpi_processor_exit(void)
acpi_bus_unregister_driver(&acpi_processor_driver);
cpuidle_unregister_driver(&acpi_idle_driver);
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
return;
......
This diff is collapsed.
This diff is collapsed.
/*
* SMBus driver for ACPI Embedded Controller (v0.1)
*
* Copyright (c) 2007 Alexey Starikovskiy
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2.
*/
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/actypes.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "sbshc.h"
#define ACPI_SMB_HC_CLASS "smbus_host_controller"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
struct acpi_smb_hc {
struct acpi_ec *ec;
struct mutex lock;
wait_queue_head_t wait;
u8 offset;
u8 query_bit;
smbus_alarm_callback callback;
void *context;
};
static int acpi_smbus_hc_add(struct acpi_device *device);
static int acpi_smbus_hc_remove(struct acpi_device *device, int type);
static const struct acpi_device_id sbs_device_ids[] = {
{"ACPI0001", 0},
{"ACPI0005", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
static struct acpi_driver acpi_smb_hc_driver = {
.name = "smbus_hc",
.class = ACPI_SMB_HC_CLASS,
.ids = sbs_device_ids,
.ops = {
.add = acpi_smbus_hc_add,
.remove = acpi_smbus_hc_remove,
},
};
union acpi_smb_status {
u8 raw;
struct {
u8 status:5;
u8 reserved:1;
u8 alarm:1;
u8 done:1;
} fields;
};
enum acpi_smb_status_codes {
SMBUS_OK = 0,
SMBUS_UNKNOWN_FAILURE = 0x07,
SMBUS_DEVICE_ADDRESS_NACK = 0x10,
SMBUS_DEVICE_ERROR = 0x11,
SMBUS_DEVICE_COMMAND_ACCESS_DENIED = 0x12,
SMBUS_UNKNOWN_ERROR = 0x13,
SMBUS_DEVICE_ACCESS_DENIED = 0x17,
SMBUS_TIMEOUT = 0x18,
SMBUS_HOST_UNSUPPORTED_PROTOCOL = 0x19,
SMBUS_BUSY = 0x1a,
SMBUS_PEC_ERROR = 0x1f,
};
enum acpi_smb_offset {
ACPI_SMB_PROTOCOL = 0, /* protocol, PEC */
ACPI_SMB_STATUS = 1, /* status */
ACPI_SMB_ADDRESS = 2, /* address */
ACPI_SMB_COMMAND = 3, /* command */
ACPI_SMB_DATA = 4, /* 32 data registers */
ACPI_SMB_BLOCK_COUNT = 0x24, /* number of data bytes */
ACPI_SMB_ALARM_ADDRESS = 0x25, /* alarm address */
ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
};
static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
{
return ec_read(hc->offset + address, data);
}
static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
{
return ec_write(hc->offset + address, data);
}
static inline int smb_check_done(struct acpi_smb_hc *hc)
{
union acpi_smb_status status = {.raw = 0};
smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
return status.fields.done && (status.fields.status == SMBUS_OK);
}
static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
{
if (wait_event_timeout(hc->wait, smb_check_done(hc),
msecs_to_jiffies(timeout)))
return 0;
else
return -ETIME;
}
int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, u8 address,
u8 command, u8 *data, u8 length)
{
int ret = -EFAULT, i;
u8 temp, sz = 0;
mutex_lock(&hc->lock);
if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
goto end;
if (temp) {
ret = -EBUSY;
goto end;
}
smb_hc_write(hc, ACPI_SMB_COMMAND, command);
smb_hc_write(hc, ACPI_SMB_COMMAND, command);
if (!(protocol & 0x01)) {
smb_hc_write(hc, ACPI_SMB_BLOCK_COUNT, length);
for (i = 0; i < length; ++i)
smb_hc_write(hc, ACPI_SMB_DATA + i, data[i]);
}
smb_hc_write(hc, ACPI_SMB_ADDRESS, address << 1);
smb_hc_write(hc, ACPI_SMB_PROTOCOL, protocol);
/*
* Wait for completion. Save the status code, data size,
* and data into the return package (if required by the protocol).
*/
ret = wait_transaction_complete(hc, 1000);
if (ret || !(protocol & 0x01))
goto end;
switch (protocol) {
case SMBUS_RECEIVE_BYTE:
case SMBUS_READ_BYTE:
sz = 1;
break;
case SMBUS_READ_WORD:
sz = 2;
break;
case SMBUS_READ_BLOCK:
if (smb_hc_read(hc, ACPI_SMB_BLOCK_COUNT, &sz)) {
ret = -EFAULT;
goto end;
}
sz &= 0x1f;
break;
}
for (i = 0; i < sz; ++i)
smb_hc_read(hc, ACPI_SMB_DATA + i, &data[i]);
end:
mutex_unlock(&hc->lock);
return ret;
}
int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
u8 command, u8 *data)
{
return acpi_smbus_transaction(hc, protocol, address, command, data, 0);
}
EXPORT_SYMBOL_GPL(acpi_smbus_read);
int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address,
u8 command, u8 *data, u8 length)
{
return acpi_smbus_transaction(hc, protocol, address, command, data, length);
}
EXPORT_SYMBOL_GPL(acpi_smbus_write);
int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
smbus_alarm_callback callback, void *context)
{
mutex_lock(&hc->lock);
hc->callback = callback;
hc->context = context;
mutex_unlock(&hc->lock);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_smbus_register_callback);
int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc)
{
mutex_lock(&hc->lock);
hc->callback = NULL;
hc->context = NULL;
mutex_unlock(&hc->lock);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_smbus_unregister_callback);
static void acpi_smbus_callback(void *context)
{
struct acpi_smb_hc *hc = context;
if (hc->callback)
hc->callback(hc->context);
}
static int smbus_alarm(void *context)
{
struct acpi_smb_hc *hc = context;
union acpi_smb_status status;
if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
return 0;
/* Check if it is only a completion notify */
if (status.fields.done)
wake_up(&hc->wait);
if (!status.fields.alarm)
return 0;
mutex_lock(&hc->lock);
smb_hc_write(hc, ACPI_SMB_STATUS, status.raw);
if (hc->callback)
acpi_os_execute(OSL_GPE_HANDLER, acpi_smbus_callback, hc);
mutex_unlock(&hc->lock);
return 0;
}
typedef int (*acpi_ec_query_func) (void *data);
extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data);
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
unsigned long val;
struct acpi_smb_hc *hc;
if (!device)
return -EINVAL;
status = acpi_evaluate_integer(device->handle, "_EC", NULL, &val);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "error obtaining _EC.\n");
return -EIO;
}
strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL);
if (!hc)
return -ENOMEM;
mutex_init(&hc->lock);
init_waitqueue_head(&hc->wait);
hc->ec = acpi_driver_data(device->parent);
hc->offset = (val >> 8) & 0xff;
hc->query_bit = val & 0xff;
acpi_driver_data(device) = hc;
acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
hc->ec, hc->offset, hc->query_bit);
return 0;
}
extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
{
struct acpi_smb_hc *hc;
if (!device)
return -EINVAL;
hc = acpi_driver_data(device);
acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
kfree(hc);
return 0;
}
static int __init acpi_smb_hc_init(void)
{
int result;
result = acpi_bus_register_driver(&acpi_smb_hc_driver);
if (result < 0)
return -ENODEV;
return 0;
}
static void __exit acpi_smb_hc_exit(void)
{
acpi_bus_unregister_driver(&acpi_smb_hc_driver);
}
module_init(acpi_smb_hc_init);
module_exit(acpi_smb_hc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexey Starikovskiy");
MODULE_DESCRIPTION("ACPI SMBus HC driver");
struct acpi_smb_hc;
enum acpi_smb_protocol {
SMBUS_WRITE_QUICK = 2,
SMBUS_READ_QUICK = 3,
SMBUS_SEND_BYTE = 4,
SMBUS_RECEIVE_BYTE = 5,
SMBUS_WRITE_BYTE = 6,
SMBUS_READ_BYTE = 7,
SMBUS_WRITE_WORD = 8,
SMBUS_READ_WORD = 9,
SMBUS_WRITE_BLOCK = 0xa,
SMBUS_READ_BLOCK = 0xb,
SMBUS_PROCESS_CALL = 0xc,
SMBUS_BLOCK_PROCESS_CALL = 0xd,
};
static const u8 SMBUS_PEC = 0x80;
typedef void (*smbus_alarm_callback)(void *context);
extern int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
u8 command, u8 * data);
extern int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 slave_address,
u8 command, u8 * data, u8 length);
extern int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
smbus_alarm_callback callback, void *context);
extern int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc);
......@@ -44,7 +44,6 @@ int acpi_sleep_prepare(u32 acpi_state)
ACPI_FLUSH_CPU_CACHE();
acpi_enable_wakeup_device_prep(acpi_state);
#endif
acpi_gpe_sleep_prepare(acpi_state);
acpi_enter_sleep_state_prep(acpi_state);
return 0;
}
......@@ -268,6 +267,11 @@ static void acpi_hibernation_leave(void)
static void acpi_hibernation_finish(void)
{
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
*/
acpi_enable();
acpi_leave_sleep_state(ACPI_STATE_S4);
acpi_disable_wakeup_device(ACPI_STATE_S4);
......
......@@ -5,6 +5,5 @@ extern int acpi_suspend (u32 state);
extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
extern void acpi_enable_wakeup_device(u8 sleep_state);
extern void acpi_disable_wakeup_device(u8 sleep_state);
extern void acpi_gpe_sleep_prepare(u32 sleep_state);
extern int acpi_sleep_prepare(u32 acpi_state);
......@@ -64,36 +64,29 @@ void acpi_enable_wakeup_device(u8 sleep_state)
ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device");
spin_lock(&acpi_device_lock);
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev = container_of(node,
struct acpi_device,
wakeup_list);
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
if (!dev->wakeup.flags.valid)
continue;
/* If users want to disable run-wake GPE,
* we only disable it for wake and leave it for runtime
*/
if (dev->wakeup.flags.run_wake && !dev->wakeup.state.enabled) {
spin_unlock(&acpi_device_lock);
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
/* Re-enable it, since set_gpe_type will disable it */
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number, ACPI_ISR);
spin_lock(&acpi_device_lock);
if (!dev->wakeup.state.enabled ||
sleep_state > (u32) dev->wakeup.sleep_state) {
if (dev->wakeup.flags.run_wake) {
spin_unlock(&acpi_device_lock);
/* set_gpe_type will disable GPE, leave it like that */
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
spin_lock(&acpi_device_lock);
}
continue;
}
if (!dev->wakeup.flags.valid ||
!dev->wakeup.state.enabled ||
(sleep_state > (u32) dev->wakeup.sleep_state))
continue;
spin_unlock(&acpi_device_lock);
/* run-wake GPE has been enabled */
if (!dev->wakeup.flags.run_wake)
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number, ACPI_ISR);
dev->wakeup.state.active = 1;
spin_lock(&acpi_device_lock);
}
spin_unlock(&acpi_device_lock);
......@@ -112,26 +105,25 @@ void acpi_disable_wakeup_device(u8 sleep_state)
spin_lock(&acpi_device_lock);
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev = container_of(node,
struct acpi_device,
wakeup_list);
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
if (dev->wakeup.flags.run_wake && !dev->wakeup.state.enabled) {
spin_unlock(&acpi_device_lock);
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
/* Re-enable it, since set_gpe_type will disable it */
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number, ACPI_NOT_ISR);
spin_lock(&acpi_device_lock);
if (!dev->wakeup.flags.valid)
continue;
}
if (!dev->wakeup.flags.valid ||
!dev->wakeup.state.active ||
(sleep_state > (u32) dev->wakeup.sleep_state))
if (!dev->wakeup.state.enabled ||
sleep_state > (u32) dev->wakeup.sleep_state) {
if (dev->wakeup.flags.run_wake) {
spin_unlock(&acpi_device_lock);
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
/* Re-enable it, since set_gpe_type will disable it */
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number, ACPI_NOT_ISR);
spin_lock(&acpi_device_lock);
}
continue;
}
spin_unlock(&acpi_device_lock);
acpi_disable_wakeup_device_power(dev);
......@@ -142,7 +134,6 @@ void acpi_disable_wakeup_device(u8 sleep_state)
acpi_clear_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number, ACPI_NOT_ISR);
}
dev->wakeup.state.active = 0;
spin_lock(&acpi_device_lock);
}
spin_unlock(&acpi_device_lock);
......@@ -160,48 +151,20 @@ static int __init acpi_wakeup_device_init(void)
struct acpi_device *dev = container_of(node,
struct acpi_device,
wakeup_list);
/* In case user doesn't load button driver */
if (dev->wakeup.flags.run_wake && !dev->wakeup.state.enabled) {
spin_unlock(&acpi_device_lock);
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number, ACPI_NOT_ISR);
dev->wakeup.state.enabled = 1;
spin_lock(&acpi_device_lock);
}
if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled)
continue;
spin_unlock(&acpi_device_lock);
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number, ACPI_NOT_ISR);
dev->wakeup.state.enabled = 1;
spin_lock(&acpi_device_lock);
}
spin_unlock(&acpi_device_lock);
return 0;
}
late_initcall(acpi_wakeup_device_init);
/*
* Disable all wakeup GPEs before entering requested sleep state.
* @sleep_state: ACPI state
* Since acpi_enter_sleep_state() will disable all
* RUNTIME GPEs, we simply mark all GPES that
* are not enabled for wakeup from requested state as RUNTIME.
*/
void acpi_gpe_sleep_prepare(u32 sleep_state)
{
struct list_head *node, *next;
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev = container_of(node,
struct acpi_device,
wakeup_list);
/* The GPE can wakeup system from this state, don't touch it */
if ((u32) dev->wakeup.sleep_state >= sleep_state)
continue;
/* acpi_set_gpe_type will automatically disable GPE */
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
}
}
......@@ -400,7 +400,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
u32 table_count;
struct acpi_table_header *table;
acpi_physical_address address;
acpi_physical_address rsdt_address;
acpi_physical_address uninitialized_var(rsdt_address);
u32 length;
u8 *table_entry;
acpi_status status;
......
......@@ -195,6 +195,7 @@ struct acpi_thermal {
struct acpi_thermal_trips trips;
struct acpi_handle_list devices;
struct timer_list timer;
struct mutex lock;
};
static const struct file_operations acpi_thermal_state_fops = {
......@@ -711,6 +712,7 @@ static void acpi_thermal_check(void *data)
int result = 0;
struct acpi_thermal *tz = data;
unsigned long sleep_time = 0;
unsigned long timeout_jiffies = 0;
int i = 0;
struct acpi_thermal_state state;
......@@ -720,11 +722,15 @@ static void acpi_thermal_check(void *data)
return;
}
/* Check if someone else is already running */
if (!mutex_trylock(&tz->lock))
return;
state = tz->state;
result = acpi_thermal_get_temperature(tz);
if (result)
return;
goto unlock;
memset(&tz->state, 0, sizeof(tz->state));
......@@ -787,10 +793,13 @@ static void acpi_thermal_check(void *data)
* a thermal event occurs). Note that _TSP and _TZD values are
* given in 1/10th seconds (we must covert to milliseconds).
*/
if (tz->state.passive)
if (tz->state.passive) {
sleep_time = tz->trips.passive.tsp * 100;
else if (tz->polling_frequency > 0)
timeout_jiffies = jiffies + (HZ * sleep_time) / 1000;
} else if (tz->polling_frequency > 0) {
sleep_time = tz->polling_frequency * 100;
timeout_jiffies = round_jiffies(jiffies + (HZ * sleep_time) / 1000);
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s: temperature[%lu] sleep[%lu]\n",
tz->name, tz->temperature, sleep_time));
......@@ -804,17 +813,16 @@ static void acpi_thermal_check(void *data)
del_timer(&(tz->timer));
} else {
if (timer_pending(&(tz->timer)))
mod_timer(&(tz->timer),
jiffies + (HZ * sleep_time) / 1000);
mod_timer(&(tz->timer), timeout_jiffies);
else {
tz->timer.data = (unsigned long)tz;
tz->timer.function = acpi_thermal_run;
tz->timer.expires = jiffies + (HZ * sleep_time) / 1000;
tz->timer.expires = timeout_jiffies;
add_timer(&(tz->timer));
}
}
return;
unlock:
mutex_unlock(&tz->lock);
}
/* --------------------------------------------------------------------------
......@@ -1251,7 +1259,7 @@ static int acpi_thermal_add(struct acpi_device *device)
strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
acpi_driver_data(device) = tz;
mutex_init(&tz->lock);
result = acpi_thermal_get_info(tz);
if (result)
goto end;
......@@ -1321,7 +1329,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
}
acpi_thermal_remove_fs(device);
mutex_destroy(&tz->lock);
kfree(tz);
return 0;
}
......
......@@ -409,14 +409,17 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
static int
acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
{
int status;
int status = AE_OK;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
arg0.integer.value = level;
status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL);
if (device->cap._BCM)
status = acpi_evaluate_object(device->dev->handle, "_BCM",
&args, NULL);
device->brightness->curr = level;
return status;
}
......@@ -424,11 +427,11 @@ static int
acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
unsigned long *level)
{
int status;
status = acpi_evaluate_integer(device->dev->handle, "_BQC", NULL, level);
return status;
if (device->cap._BQC)
return acpi_evaluate_integer(device->dev->handle, "_BQC", NULL,
level);
*level = device->brightness->curr;
return AE_OK;
}
static int
......@@ -1633,9 +1636,20 @@ static int
acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event)
{
int min, max, min_above, max_below, i, l;
int min, max, min_above, max_below, i, l, delta = 255;
max = max_below = 0;
min = min_above = 255;
/* Find closest level to level_current */
for (i = 0; i < device->brightness->count; i++) {
l = device->brightness->levels[i];
if (abs(l - level_current) < abs(delta)) {
delta = l - level_current;
if (!delta)
break;
}
}
/* Ajust level_current to closest available level */
level_current += delta;
for (i = 0; i < device->brightness->count; i++) {
l = device->brightness->levels[i];
if (l < min)
......
config CPU_IDLE
bool "CPU idle PM support"
help
CPU idle is a generic framework for supporting software-controlled
idle processor power management. It includes modular cross-platform
governors that can be swapped during runtime.
If you're using a mobile platform that supports CPU idle PM (e.g.
an ACPI-capable notebook), you should say Y here.
config CPU_IDLE_GOV_LADDER
bool
depends on CPU_IDLE
default y
config CPU_IDLE_GOV_MENU
bool
depends on CPU_IDLE && NO_HZ
default y
#
# Makefile for cpuidle.
#
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
/*
* cpuidle.c - core cpuidle infrastructure
*
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* Shaohua Li <shaohua.li@intel.com>
* Adam Belay <abelay@novell.com>
*
* This code is licenced under the GPL.
*/
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/latency.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include "cpuidle.h"
DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
EXPORT_PER_CPU_SYMBOL_GPL(cpuidle_devices);
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
static void (*pm_idle_old)(void);
static int enabled_devices;
/**
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
*/
static void cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
struct cpuidle_state *target_state;
int next_state;
/* check if the device is ready */
if (!dev || !dev->enabled) {
if (pm_idle_old)
pm_idle_old();
else
local_irq_enable();
return;
}
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(dev);
if (need_resched())
return;
target_state = &dev->states[next_state];
/* enter the state and update stats */
dev->last_residency = target_state->enter(dev, target_state);
dev->last_state = target_state;
target_state->time += dev->last_residency;
target_state->usage++;
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev);
}
/**
* cpuidle_install_idle_handler - installs the cpuidle idle loop handler
*/
void cpuidle_install_idle_handler(void)
{
if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
/* Make sure all changes finished before we switch to new idle */
smp_wmb();
pm_idle = cpuidle_idle_call;
}
}
/**
* cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
*/
void cpuidle_uninstall_idle_handler(void)
{
if (enabled_devices && (pm_idle != pm_idle_old)) {
pm_idle = pm_idle_old;
cpu_idle_wait();
}
}
/**
* cpuidle_pause_and_lock - temporarily disables CPUIDLE
*/
void cpuidle_pause_and_lock(void)
{
mutex_lock(&cpuidle_lock);
cpuidle_uninstall_idle_handler();
}
EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
/**
* cpuidle_resume_and_unlock - resumes CPUIDLE operation
*/
void cpuidle_resume_and_unlock(void)
{
cpuidle_install_idle_handler();
mutex_unlock(&cpuidle_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
/**
* cpuidle_enable_device - enables idle PM for a CPU
* @dev: the CPU
*
* This function must be called between cpuidle_pause_and_lock and
* cpuidle_resume_and_unlock when used externally.
*/
int cpuidle_enable_device(struct cpuidle_device *dev)
{
int ret, i;
if (dev->enabled)
return 0;
if (!cpuidle_curr_driver || !cpuidle_curr_governor)
return -EIO;
if (!dev->state_count)
return -EINVAL;
if ((ret = cpuidle_add_state_sysfs(dev)))
return ret;
if (cpuidle_curr_governor->enable &&
(ret = cpuidle_curr_governor->enable(dev)))
goto fail_sysfs;
for (i = 0; i < dev->state_count; i++) {
dev->states[i].usage = 0;
dev->states[i].time = 0;
}
dev->last_residency = 0;
dev->last_state = NULL;
smp_wmb();
dev->enabled = 1;
enabled_devices++;
return 0;
fail_sysfs:
cpuidle_remove_state_sysfs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_enable_device);
/**
* cpuidle_disable_device - disables idle PM for a CPU
* @dev: the CPU
*
* This function must be called between cpuidle_pause_and_lock and
* cpuidle_resume_and_unlock when used externally.
*/
void cpuidle_disable_device(struct cpuidle_device *dev)
{
if (!dev->enabled)
return;
if (!cpuidle_curr_driver || !cpuidle_curr_governor)
return;
dev->enabled = 0;
if (cpuidle_curr_governor->disable)
cpuidle_curr_governor->disable(dev);
cpuidle_remove_state_sysfs(dev);
enabled_devices--;
}
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
/**
* cpuidle_register_device - registers a CPU's idle PM feature
* @dev: the cpu
*/
int cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
if (!sys_dev)
return -EINVAL;
if (!try_module_get(cpuidle_curr_driver->owner))
return -EINVAL;
init_completion(&dev->kobj_unregister);
mutex_lock(&cpuidle_lock);
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
mutex_unlock(&cpuidle_lock);
module_put(cpuidle_curr_driver->owner);
return ret;
}
cpuidle_enable_device(dev);
cpuidle_install_idle_handler();
mutex_unlock(&cpuidle_lock);
return 0;
}
EXPORT_SYMBOL_GPL(cpuidle_register_device);
/**
* cpuidle_unregister_device - unregisters a CPU's idle PM feature
* @dev: the cpu
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
cpuidle_pause_and_lock();
cpuidle_disable_device(dev);
cpuidle_remove_sysfs(sys_dev);
list_del(&dev->device_list);
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
cpuidle_resume_and_unlock();
module_put(cpuidle_curr_driver->owner);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
#ifdef CONFIG_SMP
static void smp_callback(void *v)
{
/* we already woke the CPU up, nothing more to do */
}
/*
* This function gets called when a part of the kernel has a new latency
* requirement. This means we need to get all processors out of their C-state,
* and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
* wakes them all right up.
*/
static int cpuidle_latency_notify(struct notifier_block *b,
unsigned long l, void *v)
{
smp_call_function(smp_callback, NULL, 0, 1);
return NOTIFY_OK;
}
static struct notifier_block cpuidle_latency_notifier = {
.notifier_call = cpuidle_latency_notify,
};
#define latency_notifier_init(x) do { register_latency_notifier(x); } while (0)
#else /* CONFIG_SMP */
#define latency_notifier_init(x) do { } while (0)
#endif /* CONFIG_SMP */
/**
* cpuidle_init - core initializer
*/
static int __init cpuidle_init(void)
{
int ret;
pm_idle_old = pm_idle;
ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
if (ret)
return ret;
latency_notifier_init(&cpuidle_latency_notifier);
return 0;
}
core_initcall(cpuidle_init);
/*
* cpuidle.h - The internal header file
*/
#ifndef __DRIVER_CPUIDLE_H
#define __DRIVER_CPUIDLE_H
#include <linux/sysdev.h>
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
extern struct cpuidle_driver *cpuidle_curr_driver;
extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
extern spinlock_t cpuidle_driver_lock;
/* idle loop */
extern void cpuidle_install_idle_handler(void);
extern void cpuidle_uninstall_idle_handler(void);
/* governors */
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
extern int cpuidle_add_class_sysfs(struct sysdev_class *cls);
extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls);
extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
extern int cpuidle_add_sysfs(struct sys_device *sysdev);
extern void cpuidle_remove_sysfs(struct sys_device *sysdev);
#endif /* __DRIVER_CPUIDLE_H */
/*
* driver.c - driver support
*
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* Shaohua Li <shaohua.li@intel.com>
* Adam Belay <abelay@novell.com>
*
* This code is licenced under the GPL.
*/
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/cpuidle.h>
#include "cpuidle.h"
struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
/**
* cpuidle_register_driver - registers a driver
* @drv: the driver
*/
int cpuidle_register_driver(struct cpuidle_driver *drv)
{
if (!drv)
return -EINVAL;
spin_lock(&cpuidle_driver_lock);
if (cpuidle_curr_driver) {
spin_unlock(&cpuidle_driver_lock);
return -EBUSY;
}
cpuidle_curr_driver = drv;
spin_unlock(&cpuidle_driver_lock);
return 0;
}
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
* cpuidle_unregister_driver - unregisters a driver
* @drv: the driver
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
if (!drv)
return;
spin_lock(&cpuidle_driver_lock);
cpuidle_curr_driver = NULL;
spin_unlock(&cpuidle_driver_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
/*
* governor.c - governor support
*
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* Shaohua Li <shaohua.li@intel.com>
* Adam Belay <abelay@novell.com>
*
* This code is licenced under the GPL.
*/
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/cpuidle.h>
#include "cpuidle.h"
LIST_HEAD(cpuidle_governors);
struct cpuidle_governor *cpuidle_curr_governor;
/**
* __cpuidle_find_governor - finds a governor of the specified name
* @str: the name
*
* Must be called with cpuidle_lock aquired.
*/
static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
{
struct cpuidle_governor *gov;
list_for_each_entry(gov, &cpuidle_governors, governor_list)
if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN))
return gov;
return NULL;
}
/**
* cpuidle_switch_governor - changes the governor
* @gov: the new target governor
*
* NOTE: "gov" can be NULL to specify disabled
* Must be called with cpuidle_lock aquired.
*/
int cpuidle_switch_governor(struct cpuidle_governor *gov)
{
struct cpuidle_device *dev;
if (gov == cpuidle_curr_governor)
return 0;
cpuidle_uninstall_idle_handler();
if (cpuidle_curr_governor) {
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_disable_device(dev);
module_put(cpuidle_curr_governor->owner);
}
cpuidle_curr_governor = gov;
if (gov) {
if (!try_module_get(cpuidle_curr_governor->owner))
return -EINVAL;
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_enable_device(dev);
cpuidle_install_idle_handler();
printk(KERN_INFO "cpuidle: using governor %s\n", gov->name);
}
return 0;
}
/**
* cpuidle_register_governor - registers a governor
* @gov: the governor
*/
int cpuidle_register_governor(struct cpuidle_governor *gov)
{
int ret = -EEXIST;
if (!gov || !gov->select)
return -EINVAL;
mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
list_add_tail(&gov->governor_list, &cpuidle_governors);
if (!cpuidle_curr_governor ||
cpuidle_curr_governor->rating < gov->rating)
cpuidle_switch_governor(gov);
}
mutex_unlock(&cpuidle_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register_governor);
/**
* cpuidle_replace_governor - find a replacement governor
* @exclude_rating: the rating that will be skipped while looking for
* new governor.
*/
static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating)
{
struct cpuidle_governor *gov;
struct cpuidle_governor *ret_gov = NULL;
unsigned int max_rating = 0;
list_for_each_entry(gov, &cpuidle_governors, governor_list) {
if (gov->rating == exclude_rating)
continue;
if (gov->rating > max_rating) {
max_rating = gov->rating;
ret_gov = gov;
}
}
return ret_gov;
}
/**
* cpuidle_unregister_governor - unregisters a governor
* @gov: the governor
*/
void cpuidle_unregister_governor(struct cpuidle_governor *gov)
{
if (!gov)
return;
mutex_lock(&cpuidle_lock);
if (gov == cpuidle_curr_governor) {
struct cpuidle_governor *new_gov;
new_gov = cpuidle_replace_governor(gov->rating);
cpuidle_switch_governor(new_gov);
}
list_del(&gov->governor_list);
mutex_unlock(&cpuidle_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_governor);
#
# Makefile for cpuidle governors.
#
obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o
obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o
/*
* ladder.c - the residency ladder algorithm
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
*
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* Shaohua Li <shaohua.li@intel.com>
* Adam Belay <abelay@novell.com>
*
* This code is licenced under the GPL.
*/
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/latency.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#define PROMOTION_COUNT 4
#define DEMOTION_COUNT 1
struct ladder_device_state {
struct {
u32 promotion_count;
u32 demotion_count;
u32 promotion_time;
u32 demotion_time;
} threshold;
struct {
int promotion_count;
int demotion_count;
} stats;
};
struct ladder_device {
struct ladder_device_state states[CPUIDLE_STATE_MAX];
int last_state_idx;
};
static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
/**
* ladder_do_selection - prepares private data for a state change
* @ldev: the ladder device
* @old_idx: the current state index
* @new_idx: the new target state index
*/
static inline void ladder_do_selection(struct ladder_device *ldev,
int old_idx, int new_idx)
{
ldev->states[old_idx].stats.promotion_count = 0;
ldev->states[old_idx].stats.demotion_count = 0;
ldev->last_state_idx = new_idx;
}
/**
* ladder_select_state - selects the next state to enter
* @dev: the CPU
*/
static int ladder_select_state(struct cpuidle_device *dev)
{
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
if (unlikely(!ldev))
return 0;
last_state = &ldev->states[last_idx];
if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
else
last_residency = last_state->threshold.promotion_time + 1;
/* consider promotion */
if (last_idx < dev->state_count - 1 &&
last_residency > last_state->threshold.promotion_time &&
dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
ladder_do_selection(ldev, last_idx, last_idx + 1);
return last_idx + 1;
}
}
/* consider demotion */
if (last_idx > 0 &&
last_residency < last_state->threshold.demotion_time) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
ladder_do_selection(ldev, last_idx, last_idx - 1);
return last_idx - 1;
}
}
/* otherwise remain at the current state */
return last_idx;
}
/**
* ladder_enable_device - setup for the governor
* @dev: the CPU
*/
static int ladder_enable_device(struct cpuidle_device *dev)
{
int i;
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
struct ladder_device_state *lstate;
struct cpuidle_state *state;
ldev->last_state_idx = 0;
for (i = 0; i < dev->state_count; i++) {
state = &dev->states[i];
lstate = &ldev->states[i];
lstate->stats.promotion_count = 0;
lstate->stats.demotion_count = 0;
lstate->threshold.promotion_count = PROMOTION_COUNT;
lstate->threshold.demotion_count = DEMOTION_COUNT;
if (i < dev->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency;
if (i > 0)
lstate->threshold.demotion_time = state->exit_latency;
}
return 0;
}
static struct cpuidle_governor ladder_governor = {
.name = "ladder",
.rating = 10,
.enable = ladder_enable_device,
.select = ladder_select_state,
.owner = THIS_MODULE,
};
/**
* init_ladder - initializes the governor
*/
static int __init init_ladder(void)
{
return cpuidle_register_governor(&ladder_governor);
}
/**
* exit_ladder - exits the governor
*/
static void __exit exit_ladder(void)
{
cpuidle_unregister_governor(&ladder_governor);
}
MODULE_LICENSE("GPL");
module_init(init_ladder);
module_exit(exit_ladder);
/*
* menu.c - the menu idle governor
*
* Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
*
* This code is licenced under the GPL.
*/
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/latency.h>
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#define BREAK_FUZZ 4 /* 4 us */
struct menu_device {
int last_state_idx;
unsigned int expected_us;
unsigned int predicted_us;
unsigned int last_measured_us;
unsigned int elapsed_us;
};
static DEFINE_PER_CPU(struct menu_device, menu_devices);
/**
* menu_select - selects the next idle state to enter
* @dev: the CPU
*/
static int menu_select(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int i;
/* determine the expected residency time */
data->expected_us =
(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
/* find the deepest idle state that satisfies our constraints */
for (i = 1; i < dev->state_count; i++) {
struct cpuidle_state *s = &dev->states[i];
if (s->target_residency > data->expected_us)
break;
if (s->target_residency > data->predicted_us)
break;
if (s->exit_latency > system_latency_constraint())
break;
}
data->last_state_idx = i - 1;
return i - 1;
}
/**
* menu_reflect - attempts to guess what happened after entry
* @dev: the CPU
*
* NOTE: it's important to be fast here because this operation will add to
* the overall exit latency.
*/
static void menu_reflect(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
unsigned int measured_us =
cpuidle_get_last_residency(dev) + data->elapsed_us;
struct cpuidle_state *target = &dev->states[last_idx];
/*
* Ugh, this idle state doesn't support residency measurements, so we
* are basically lost in the dark. As a compromise, assume we slept
* for one full standard timer tick. However, be aware that this
* could potentially result in a suboptimal state transition.
*/
if (!(target->flags & CPUIDLE_FLAG_TIME_VALID))
measured_us = USEC_PER_SEC / HZ;
/* Predict time remaining until next break event */
if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) {
data->predicted_us = max(measured_us, data->last_measured_us);
data->last_measured_us = measured_us;
data->elapsed_us = 0;
} else {
if (data->elapsed_us < data->elapsed_us + measured_us)
data->elapsed_us = measured_us;
else
data->elapsed_us = -1;
data->predicted_us = max(measured_us, data->last_measured_us);
}
}
/**
* menu_enable_device - scans a CPU's states and does setup
* @dev: the CPU
*/
static int menu_enable_device(struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
memset(data, 0, sizeof(struct menu_device));
return 0;
}
static struct cpuidle_governor menu_governor = {
.name = "menu",
.rating = 20,
.enable = menu_enable_device,
.select = menu_select,
.reflect = menu_reflect,
.owner = THIS_MODULE,
};
/**
* init_menu - initializes the governor
*/
static int __init init_menu(void)
{
return cpuidle_register_governor(&menu_governor);
}
/**
* exit_menu - exits the governor
*/
static void __exit exit_menu(void)
{
cpuidle_unregister_governor(&menu_governor);
}
MODULE_LICENSE("GPL");
module_init(init_menu);
module_exit(exit_menu);
/*
* sysfs.c - sysfs support
*
* (C) 2006-2007 Shaohua Li <shaohua.li@intel.com>
*
* This code is licenced under the GPL.
*/
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include "cpuidle.h"
static unsigned int sysfs_switch;
static int __init cpuidle_sysfs_setup(char *unused)
{
sysfs_switch = 1;
return 1;
}
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
static ssize_t show_available_governors(struct sys_device *dev, char *buf)
{
ssize_t i = 0;
struct cpuidle_governor *tmp;
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2))
goto out;
i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name);
}
out:
i+= sprintf(&buf[i], "\n");
mutex_unlock(&cpuidle_lock);
return i;
}
static ssize_t show_current_driver(struct sys_device *dev, char *buf)
{
ssize_t ret;
spin_lock(&cpuidle_driver_lock);
if (cpuidle_curr_driver)
ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name);
else
ret = sprintf(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
return ret;
}
static ssize_t show_current_governor(struct sys_device *dev, char *buf)
{
ssize_t ret;
mutex_lock(&cpuidle_lock);
if (cpuidle_curr_governor)
ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
else
ret = sprintf(buf, "none\n");
mutex_unlock(&cpuidle_lock);
return ret;
}
static ssize_t store_current_governor(struct sys_device *dev,
const char *buf, size_t count)
{
char gov_name[CPUIDLE_NAME_LEN];
int ret = -EINVAL;
size_t len = count;
struct cpuidle_governor *gov;
if (!len || len >= sizeof(gov_name))
return -EINVAL;
memcpy(gov_name, buf, len);
gov_name[len] = '\0';
if (gov_name[len - 1] == '\n')
gov_name[--len] = '\0';
mutex_lock(&cpuidle_lock);
list_for_each_entry(gov, &cpuidle_governors, governor_list) {
if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) {
ret = cpuidle_switch_governor(gov);
break;
}
}
mutex_unlock(&cpuidle_lock);
if (ret)
return ret;
else
return count;
}
static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL);
static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
static struct attribute *cpuclass_default_attrs[] = {
&attr_current_driver.attr,
&attr_current_governor_ro.attr,
NULL
};
static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL);
static SYSDEV_ATTR(current_governor, 0644, show_current_governor,
store_current_governor);
static struct attribute *cpuclass_switch_attrs[] = {
&attr_available_governors.attr,
&attr_current_driver.attr,
&attr_current_governor.attr,
NULL
};
static struct attribute_group cpuclass_attr_group = {
.attrs = cpuclass_default_attrs,
.name = "cpuidle",
};
/**
* cpuidle_add_class_sysfs - add CPU global sysfs attributes
*/
int cpuidle_add_class_sysfs(struct sysdev_class *cls)
{
if (sysfs_switch)
cpuclass_attr_group.attrs = cpuclass_switch_attrs;
return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group);
}
/**
* cpuidle_remove_class_sysfs - remove CPU global sysfs attributes
*/
void cpuidle_remove_class_sysfs(struct sysdev_class *cls)
{
sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group);
}
struct cpuidle_attr {
struct attribute attr;
ssize_t (*show)(struct cpuidle_device *, char *);
ssize_t (*store)(struct cpuidle_device *, const char *, size_t count);
};
#define define_one_ro(_name, show) \
static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
#define define_one_rw(_name, show, store) \
static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store)
#define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj)
#define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr)
static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf)
{
int ret = -EIO;
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
if (cattr->show) {
mutex_lock(&cpuidle_lock);
ret = cattr->show(dev, buf);
mutex_unlock(&cpuidle_lock);
}
return ret;
}
static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
const char * buf, size_t count)
{
int ret = -EIO;
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
if (cattr->store) {
mutex_lock(&cpuidle_lock);
ret = cattr->store(dev, buf, count);
mutex_unlock(&cpuidle_lock);
}
return ret;
}
static struct sysfs_ops cpuidle_sysfs_ops = {
.show = cpuidle_show,
.store = cpuidle_store,
};
static void cpuidle_sysfs_release(struct kobject *kobj)
{
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
complete(&dev->kobj_unregister);
}
static struct kobj_type ktype_cpuidle = {
.sysfs_ops = &cpuidle_sysfs_ops,
.release = cpuidle_sysfs_release,
};
struct cpuidle_state_attr {
struct attribute attr;
ssize_t (*show)(struct cpuidle_state *, char *);
ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
};
#define define_one_state_ro(_name, show) \
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
#define define_show_state_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
{ \
return sprintf(buf, "%u\n", state->_name);\
}
static ssize_t show_state_name(struct cpuidle_state *state, char *buf)
{
return sprintf(buf, "%s\n", state->name);
}
define_show_state_function(exit_latency)
define_show_state_function(power_usage)
define_show_state_function(usage)
define_show_state_function(time)
define_one_state_ro(name, show_state_name);
define_one_state_ro(latency, show_state_exit_latency);
define_one_state_ro(power, show_state_power_usage);
define_one_state_ro(usage, show_state_usage);
define_one_state_ro(time, show_state_time);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
&attr_latency.attr,
&attr_power.attr,
&attr_usage.attr,
&attr_time.attr,
NULL
};
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
static ssize_t cpuidle_state_show(struct kobject * kobj,
struct attribute * attr ,char * buf)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
if (cattr->show)
ret = cattr->show(state, buf);
return ret;
}
static struct sysfs_ops cpuidle_state_sysfs_ops = {
.show = cpuidle_state_show,
};
static void cpuidle_state_sysfs_release(struct kobject *kobj)
{
struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj);
complete(&state_obj->kobj_unregister);
}
static struct kobj_type ktype_state_cpuidle = {
.sysfs_ops = &cpuidle_state_sysfs_ops,
.default_attrs = cpuidle_state_default_attrs,
.release = cpuidle_state_sysfs_release,
};
static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
{
kobject_unregister(&device->kobjs[i]->kobj);
wait_for_completion(&device->kobjs[i]->kobj_unregister);
kfree(device->kobjs[i]);
device->kobjs[i] = NULL;
}
/**
* cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes
* @device: the target device
*/
int cpuidle_add_state_sysfs(struct cpuidle_device *device)
{
int i, ret = -ENOMEM;
struct cpuidle_state_kobj *kobj;
/* state statistics */
for (i = 0; i < device->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;
kobj->state = &device->states[i];
init_completion(&kobj->kobj_unregister);
kobj->kobj.parent = &device->kobj;
kobj->kobj.ktype = &ktype_state_cpuidle;
kobject_set_name(&kobj->kobj, "state%d", i);
ret = kobject_register(&kobj->kobj);
if (ret) {
kfree(kobj);
goto error_state;
}
device->kobjs[i] = kobj;
}
return 0;
error_state:
for (i = i - 1; i >= 0; i--)
cpuidle_free_state_kobj(device, i);
return ret;
}
/**
* cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes
* @device: the target device
*/
void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
{
int i;
for (i = 0; i < device->state_count; i++)
cpuidle_free_state_kobj(device, i);
}
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
* @sysdev: the target device
*/
int cpuidle_add_sysfs(struct sys_device *sysdev)
{
int cpu = sysdev->id;
struct cpuidle_device *dev;
dev = per_cpu(cpuidle_devices, cpu);
dev->kobj.parent = &sysdev->kobj;
dev->kobj.ktype = &ktype_cpuidle;
kobject_set_name(&dev->kobj, "%s", "cpuidle");
return kobject_register(&dev->kobj);
}
/**
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
* @sysdev: the target device
*/
void cpuidle_remove_sysfs(struct sys_device *sysdev)
{
int cpu = sysdev->id;
struct cpuidle_device *dev;
dev = per_cpu(cpuidle_devices, cpu);
kobject_unregister(&dev->kobj);
}
......@@ -111,6 +111,21 @@ config ASUS_LAPTOP
If you have an ACPI-compatible ASUS laptop, say Y or M here.
config FUJITSU_LAPTOP
tristate "Fujitsu Laptop Extras"
depends on X86
depends on ACPI
depends on BACKLIGHT_CLASS_DEVICE
---help---
This is a driver for laptops built by Fujitsu:
* P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks
* Possibly other Fujitsu laptop models
It adds support for LCD brightness control.
If you have a Fujitsu laptop, say Y or M here.
config MSI_LAPTOP
tristate "MSI Laptop Extras"
depends on X86
......@@ -134,6 +149,7 @@ config SONY_LAPTOP
tristate "Sony Laptop Extras"
depends on X86 && ACPI
select BACKLIGHT_CLASS_DEVICE
depends on INPUT
---help---
This mini-driver drives the SNC and SPIC devices present in the ACPI
BIOS of the Sony Vaio laptops.
......@@ -156,6 +172,7 @@ config THINKPAD_ACPI
select BACKLIGHT_CLASS_DEVICE
select HWMON
select NVRAM
depends on INPUT
---help---
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
support for Fn-Fx key combinations, Bluetooth control, video
......
......@@ -15,4 +15,5 @@ obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -58,13 +58,14 @@
#define IBM_NAME "thinkpad"
#define IBM_DESC "ThinkPad ACPI Extras"
#define IBM_FILE "thinkpad_acpi"
#define IBM_FILE IBM_NAME "_acpi"
#define IBM_URL "http://ibm-acpi.sf.net/"
#define IBM_MAIL "ibm-acpi-devel@lists.sourceforge.net"
#define IBM_PROC_DIR "ibm"
#define IBM_ACPI_EVENT_PREFIX "ibm"
#define IBM_DRVR_NAME IBM_FILE
#define IBM_HWMON_DRVR_NAME IBM_NAME "_hwmon"
#define IBM_LOG IBM_FILE ": "
#define IBM_ERR KERN_ERR IBM_LOG
......@@ -171,6 +172,7 @@ static int parse_strtoul(const char *buf, unsigned long max,
/* Device model */
static struct platform_device *tpacpi_pdev;
static struct platform_device *tpacpi_sensors_pdev;
static struct device *tpacpi_hwmon;
static struct platform_driver tpacpi_pdriver;
static struct input_dev *tpacpi_inputdev;
......@@ -233,22 +235,25 @@ struct ibm_init_struct {
static struct {
#ifdef CONFIG_THINKPAD_ACPI_BAY
u16 bay_status:1;
u16 bay_eject:1;
u16 bay_status2:1;
u16 bay_eject2:1;
u32 bay_status:1;
u32 bay_eject:1;
u32 bay_status2:1;
u32 bay_eject2:1;
#endif
u16 bluetooth:1;
u16 hotkey:1;
u16 hotkey_mask:1;
u16 hotkey_wlsw:1;
u16 light:1;
u16 light_status:1;
u16 wan:1;
u16 fan_ctrl_status_undef:1;
u16 input_device_registered:1;
u16 platform_drv_registered:1;
u16 platform_drv_attrs_registered:1;
u32 bluetooth:1;
u32 hotkey:1;
u32 hotkey_mask:1;
u32 hotkey_wlsw:1;
u32 light:1;
u32 light_status:1;
u32 wan:1;
u32 fan_ctrl_status_undef:1;
u32 input_device_registered:1;
u32 platform_drv_registered:1;
u32 platform_drv_attrs_registered:1;
u32 sensors_pdrv_registered:1;
u32 sensors_pdrv_attrs_registered:1;
u32 sensors_pdev_attrs_registered:1;
} tp_features;
struct thinkpad_id_data {
......
......@@ -1858,14 +1858,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
modify_acceptable_latency("ipw2100", INFINITE_LATENCY);
#ifdef ACPI_CSTATE_LIMIT_DEFINED
if (priv->config & CFG_C3_DISABLED) {
IPW_DEBUG_INFO(": Resetting C3 transitions.\n");
acpi_set_cstate_limit(priv->cstate_limit);
priv->config &= ~CFG_C3_DISABLED;
}
#endif
/* We have to signal any supplicant if we are disassociating */
if (associated)
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
......@@ -2091,14 +2083,6 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
/* RF_KILL is now enabled (else we wouldn't be here) */
priv->status |= STATUS_RF_KILL_HW;
#ifdef ACPI_CSTATE_LIMIT_DEFINED
if (priv->config & CFG_C3_DISABLED) {
IPW_DEBUG_INFO(": Resetting C3 transitions.\n");
acpi_set_cstate_limit(priv->cstate_limit);
priv->config &= ~CFG_C3_DISABLED;
}
#endif
/* Make sure the RF Kill check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
......@@ -2329,23 +2313,10 @@ static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i)
u32 match, reg;
int j;
#endif
#ifdef ACPI_CSTATE_LIMIT_DEFINED
int limit;
#endif
IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n",
i * sizeof(struct ipw2100_status));
#ifdef ACPI_CSTATE_LIMIT_DEFINED
IPW_DEBUG_INFO(": Disabling C3 transitions.\n");
limit = acpi_get_cstate_limit();
if (limit > 2) {
priv->cstate_limit = limit;
acpi_set_cstate_limit(2);
priv->config |= CFG_C3_DISABLED;
}
#endif
#ifdef IPW2100_DEBUG_C3
/* Halt the fimrware so we can get a good image */
write_register(priv->net_dev, IPW_REG_RESET_REG,
......
......@@ -479,7 +479,6 @@ enum {
#define CFG_ASSOCIATE (1<<6)
#define CFG_FIXED_RATE (1<<7)
#define CFG_ADHOC_CREATE (1<<8)
#define CFG_C3_DISABLED (1<<9)
#define CFG_PASSIVE_SCAN (1<<10)
#ifdef CONFIG_IPW2100_MONITOR
#define CFG_CRC_CHECK (1<<11)
......@@ -508,7 +507,6 @@ struct ipw2100_priv {
u8 bssid[ETH_ALEN];
u8 channel;
int last_mode;
int cstate_limit;
unsigned long connect_start;
unsigned long last_reset;
......
......@@ -71,9 +71,9 @@ u32 acpi_hw_get_mode(void);
struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id);
acpi_status
acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value);
acpi_hw_register_read(u32 register_id, u32 * return_value);
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value);
acpi_status acpi_hw_register_write(u32 register_id, u32 value);
acpi_status
acpi_hw_low_level_read(u32 width,
......
......@@ -264,7 +264,6 @@ struct acpi_device_wakeup_flags {
struct acpi_device_wakeup_state {
u8 enabled:1;
u8 active:1;
};
struct acpi_device_wakeup {
......@@ -333,6 +332,7 @@ int acpi_bus_get_power(acpi_handle handle, int *state);
int acpi_bus_set_power(acpi_handle handle, int state);
#ifdef CONFIG_ACPI_PROC_EVENT
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
int acpi_bus_receive_event(struct acpi_bus_event *event);
#else
static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
......
......@@ -314,6 +314,8 @@ acpi_resource_to_address64(struct acpi_resource *resource,
*/
acpi_status acpi_get_register(u32 register_id, u32 * return_value);
acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value);
acpi_status acpi_set_register(u32 register_id, u32 value);
acpi_status
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment