Commit 56963d99 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-sleep', 'pm-domains' and 'pm-avs'

* pm-sleep:
  PM / suspend: make sync() on suspend-to-RAM build-time optional
  PM / sleep: Allow devices without runtime PM to do direct-complete
  PM / autosleep: Use workqueue for user space wakeup sources garbage collector

* pm-domains:
  PM / Domains: Fix typo in description of genpd_dev_pm_detach()
  PM / Domains: Remove unusable governor dummies
  PM / Domains: Make pm_genpd_init() available to modules
  PM / domains: Align column headers and data in pm_genpd_summary output
  PM / Domains: Return -EPROBE_DEFER if we fail to init or turn-on domain
  PM / Domains: Correct unit address in power-controller example
  PM / Domains: Remove intermediate states from the power off sequence

* pm-avs:
  PM / AVS: rockchip-io: add io selectors and supplies for rk3368
  PM / AVS: rockchip-io: depend on CONFIG_POWER_AVS
...@@ -48,7 +48,7 @@ Example 2: ...@@ -48,7 +48,7 @@ Example 2:
#power-domain-cells = <1>; #power-domain-cells = <1>;
}; };
child: power-controller@12340000 { child: power-controller@12341000 {
compatible = "foo,power-controller"; compatible = "foo,power-controller";
reg = <0x12341000 0x1000>; reg = <0x12341000 0x1000>;
power-domains = <&parent 0>; power-domains = <&parent 0>;
......
...@@ -33,6 +33,8 @@ Required properties: ...@@ -33,6 +33,8 @@ Required properties:
- compatible: should be one of: - compatible: should be one of:
- "rockchip,rk3188-io-voltage-domain" for rk3188 - "rockchip,rk3188-io-voltage-domain" for rk3188
- "rockchip,rk3288-io-voltage-domain" for rk3288 - "rockchip,rk3288-io-voltage-domain" for rk3288
- "rockchip,rk3368-io-voltage-domain" for rk3368
- "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
- rockchip,grf: phandle to the syscon managing the "general register files" - rockchip,grf: phandle to the syscon managing the "general register files"
...@@ -64,6 +66,18 @@ Possible supplies for rk3288: ...@@ -64,6 +66,18 @@ Possible supplies for rk3288:
- sdcard-supply: The supply connected to SDMMC0_VDD. - sdcard-supply: The supply connected to SDMMC0_VDD.
- wifi-supply: The supply connected to APIO3_VDD. Also known as SDIO0. - wifi-supply: The supply connected to APIO3_VDD. Also known as SDIO0.
Possible supplies for rk3368:
- audio-supply: The supply connected to APIO3_VDD.
- dvp-supply: The supply connected to DVPIO_VDD.
- flash0-supply: The supply connected to FLASH0_VDD. Typically for eMMC
- gpio30-supply: The supply connected to APIO1_VDD.
- gpio1830 The supply connected to APIO4_VDD.
- sdcard-supply: The supply connected to SDMMC0_VDD.
- wifi-supply: The supply connected to APIO2_VDD. Also known as SDIO0.
Possible supplies for rk3368 pmu-domains:
- pmu-supply: The supply connected to PMUIO_VDD.
- vop-supply: The supply connected to LCDC_VDD.
Example: Example:
......
...@@ -341,6 +341,13 @@ the phases are: ...@@ -341,6 +341,13 @@ the phases are:
and is entirely responsible for bringing the device back to the and is entirely responsible for bringing the device back to the
functional state as appropriate. functional state as appropriate.
Note that this direct-complete procedure applies even if the device is
disabled for runtime PM; only the runtime-PM status matters. It follows
that if a device has system-sleep callbacks but does not support runtime
PM, then its prepare callback must never return a positive value. This
is because all devices are initially set to runtime-suspended with
runtime PM disabled.
2. The suspend methods should quiesce the device to stop it from performing 2. The suspend methods should quiesce the device to stop it from performing
I/O. They also may save the device registers and put it into the I/O. They also may save the device registers and put it into the
appropriate low-power state, depending on the bus type the device is on, appropriate low-power state, depending on the bus type the device is on,
......
...@@ -445,10 +445,6 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: ...@@ -445,10 +445,6 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
bool pm_runtime_status_suspended(struct device *dev); bool pm_runtime_status_suspended(struct device *dev);
- return true if the device's runtime PM status is 'suspended' - return true if the device's runtime PM status is 'suspended'
bool pm_runtime_suspended_if_enabled(struct device *dev);
- return true if the device's runtime PM status is 'suspended' and its
'power.disable_depth' field is equal to 1
void pm_runtime_allow(struct device *dev); void pm_runtime_allow(struct device *dev);
- set the power.runtime_auto flag for the device and decrease its usage - set the power.runtime_auto flag for the device and decrease its usage
counter (used by the /sys/devices/.../power/control interface to counter (used by the /sys/devices/.../power/control interface to
......
...@@ -114,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) ...@@ -114,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
stop_latency_ns, "stop"); stop_latency_ns, "stop");
} }
static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev,
bool timed)
{ {
if (!timed)
return GENPD_DEV_CALLBACK(genpd, int, start, dev);
return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
start_latency_ns, "start"); start_latency_ns, "start");
} }
...@@ -136,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) ...@@ -136,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic(); smp_mb__after_atomic();
} }
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
{
DEFINE_WAIT(wait);
mutex_lock(&genpd->lock);
/*
* Wait for the domain to transition into either the active,
* or the power off state.
*/
for (;;) {
prepare_to_wait(&genpd->status_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (genpd->status == GPD_STATE_ACTIVE
|| genpd->status == GPD_STATE_POWER_OFF)
break;
mutex_unlock(&genpd->lock);
schedule();
mutex_lock(&genpd->lock);
}
finish_wait(&genpd->status_wait_queue, &wait);
}
static void genpd_release_lock(struct generic_pm_domain *genpd)
{
mutex_unlock(&genpd->lock);
}
static void genpd_set_active(struct generic_pm_domain *genpd)
{
if (genpd->resume_count == 0)
genpd->status = GPD_STATE_ACTIVE;
}
static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
{ {
s64 usecs64; s64 usecs64;
...@@ -251,35 +220,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) ...@@ -251,35 +220,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
* resume a device belonging to it. * resume a device belonging to it.
*/ */
static int __pm_genpd_poweron(struct generic_pm_domain *genpd) static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{ {
struct gpd_link *link; struct gpd_link *link;
DEFINE_WAIT(wait);
int ret = 0; int ret = 0;
/* If the domain's master is being waited for, we have to wait too. */
for (;;) {
prepare_to_wait(&genpd->status_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (genpd->status != GPD_STATE_WAIT_MASTER)
break;
mutex_unlock(&genpd->lock);
schedule();
mutex_lock(&genpd->lock);
}
finish_wait(&genpd->status_wait_queue, &wait);
if (genpd->status == GPD_STATE_ACTIVE if (genpd->status == GPD_STATE_ACTIVE
|| (genpd->prepared_count > 0 && genpd->suspend_power_off)) || (genpd->prepared_count > 0 && genpd->suspend_power_off))
return 0; return 0;
if (genpd->status != GPD_STATE_POWER_OFF) {
genpd_set_active(genpd);
return 0;
}
if (genpd->cpuidle_data) { if (genpd->cpuidle_data) {
cpuidle_pause_and_lock(); cpuidle_pause_and_lock();
genpd->cpuidle_data->idle_state->disabled = true; genpd->cpuidle_data->idle_state->disabled = true;
...@@ -294,20 +242,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) ...@@ -294,20 +242,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
*/ */
list_for_each_entry(link, &genpd->slave_links, slave_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_inc(link->master); genpd_sd_counter_inc(link->master);
genpd->status = GPD_STATE_WAIT_MASTER;
mutex_unlock(&genpd->lock);
ret = pm_genpd_poweron(link->master); ret = pm_genpd_poweron(link->master);
mutex_lock(&genpd->lock);
/*
* The "wait for parent" status is guaranteed not to change
* while the master is powering on.
*/
genpd->status = GPD_STATE_POWER_OFF;
wake_up_all(&genpd->status_wait_queue);
if (ret) { if (ret) {
genpd_sd_counter_dec(link->master); genpd_sd_counter_dec(link->master);
goto err; goto err;
...@@ -319,8 +255,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) ...@@ -319,8 +255,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
goto err; goto err;
out: out:
genpd_set_active(genpd); genpd->status = GPD_STATE_ACTIVE;
return 0; return 0;
err: err:
...@@ -356,20 +291,18 @@ int pm_genpd_name_poweron(const char *domain_name) ...@@ -356,20 +291,18 @@ int pm_genpd_name_poweron(const char *domain_name)
return genpd ? pm_genpd_poweron(genpd) : -EINVAL; return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
} }
static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
struct device *dev)
{
return GENPD_DEV_CALLBACK(genpd, int, start, dev);
}
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{ {
return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
save_state_latency_ns, "state save"); save_state_latency_ns, "state save");
} }
static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) static int genpd_restore_dev(struct generic_pm_domain *genpd,
struct device *dev, bool timed)
{ {
if (!timed)
return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
restore_state_latency_ns, restore_state_latency_ns,
"state restore"); "state restore");
...@@ -415,89 +348,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, ...@@ -415,89 +348,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
/**
* __pm_genpd_save_device - Save the pre-suspend state of a device.
* @pdd: Domain data of the device to save the state of.
* @genpd: PM domain the device belongs to.
*/
static int __pm_genpd_save_device(struct pm_domain_data *pdd,
struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
int ret = 0;
if (gpd_data->need_restore > 0)
return 0;
/*
* If the value of the need_restore flag is still unknown at this point,
* we trust that pm_genpd_poweroff() has verified that the device is
* already runtime PM suspended.
*/
if (gpd_data->need_restore < 0) {
gpd_data->need_restore = 1;
return 0;
}
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
ret = genpd_save_dev(genpd, dev);
genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
if (!ret)
gpd_data->need_restore = 1;
return ret;
}
/**
* __pm_genpd_restore_device - Restore the pre-suspend state of a device.
* @pdd: Domain data of the device to restore the state of.
* @genpd: PM domain the device belongs to.
*/
static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
int need_restore = gpd_data->need_restore;
gpd_data->need_restore = 0;
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
/*
* Call genpd_restore_dev() for recently added devices too (need_restore
* is negative then).
*/
if (need_restore)
genpd_restore_dev(genpd, dev);
mutex_lock(&genpd->lock);
}
/**
* genpd_abort_poweroff - Check if a PM domain power off should be aborted.
* @genpd: PM domain to check.
*
* Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
* a "power off" operation, which means that a "power on" has occured in the
* meantime, or if its resume_count field is different from zero, which means
* that one of its devices has been resumed in the meantime.
*/
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
{
return genpd->status == GPD_STATE_WAIT_MASTER
|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
}
/** /**
* genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
* @genpd: PM domait to power off. * @genpd: PM domait to power off.
...@@ -515,34 +365,26 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) ...@@ -515,34 +365,26 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
* @genpd: PM domain to power down. * @genpd: PM domain to power down.
* *
* If all of the @genpd's devices have been suspended and all of its subdomains * If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, run the runtime suspend callbacks provided by all of * have been powered down, remove power from @genpd.
* the @genpd's devices' drivers and remove power from @genpd.
*/ */
static int pm_genpd_poweroff(struct generic_pm_domain *genpd) static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{ {
struct pm_domain_data *pdd; struct pm_domain_data *pdd;
struct gpd_link *link; struct gpd_link *link;
unsigned int not_suspended; unsigned int not_suspended = 0;
int ret = 0;
start:
/* /*
* Do not try to power off the domain in the following situations: * Do not try to power off the domain in the following situations:
* (1) The domain is already in the "power off" state. * (1) The domain is already in the "power off" state.
* (2) The domain is waiting for its master to power up. * (2) System suspend is in progress.
* (3) One of the domain's devices is being resumed right now.
* (4) System suspend is in progress.
*/ */
if (genpd->status == GPD_STATE_POWER_OFF if (genpd->status == GPD_STATE_POWER_OFF
|| genpd->status == GPD_STATE_WAIT_MASTER || genpd->prepared_count > 0)
|| genpd->resume_count > 0 || genpd->prepared_count > 0)
return 0; return 0;
if (atomic_read(&genpd->sd_count) > 0) if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY; return -EBUSY;
not_suspended = 0;
list_for_each_entry(pdd, &genpd->dev_list, list_node) { list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat; enum pm_qos_flags_status stat;
...@@ -560,41 +402,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) ...@@ -560,41 +402,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
if (not_suspended > genpd->in_progress) if (not_suspended > genpd->in_progress)
return -EBUSY; return -EBUSY;
if (genpd->poweroff_task) {
/*
* Another instance of pm_genpd_poweroff() is executing
* callbacks, so tell it to start over and return.
*/
genpd->status = GPD_STATE_REPEAT;
return 0;
}
if (genpd->gov && genpd->gov->power_down_ok) { if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain)) if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN; return -EAGAIN;
} }
genpd->status = GPD_STATE_BUSY;
genpd->poweroff_task = current;
list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
ret = atomic_read(&genpd->sd_count) == 0 ?
__pm_genpd_save_device(pdd, genpd) : -EBUSY;
if (genpd_abort_poweroff(genpd))
goto out;
if (ret) {
genpd_set_active(genpd);
goto out;
}
if (genpd->status == GPD_STATE_REPEAT) {
genpd->poweroff_task = NULL;
goto start;
}
}
if (genpd->cpuidle_data) { if (genpd->cpuidle_data) {
/* /*
* If cpuidle_data is set, cpuidle should turn the domain off * If cpuidle_data is set, cpuidle should turn the domain off
...@@ -607,14 +419,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) ...@@ -607,14 +419,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
cpuidle_pause_and_lock(); cpuidle_pause_and_lock();
genpd->cpuidle_data->idle_state->disabled = false; genpd->cpuidle_data->idle_state->disabled = false;
cpuidle_resume_and_unlock(); cpuidle_resume_and_unlock();
goto out; return 0;
} }
if (genpd->power_off) { if (genpd->power_off) {
if (atomic_read(&genpd->sd_count) > 0) { int ret;
ret = -EBUSY;
goto out; if (atomic_read(&genpd->sd_count) > 0)
} return -EBUSY;
/* /*
* If sd_count > 0 at this point, one of the subdomains hasn't * If sd_count > 0 at this point, one of the subdomains hasn't
...@@ -625,10 +437,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) ...@@ -625,10 +437,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
* happen very often). * happen very often).
*/ */
ret = genpd_power_off(genpd, true); ret = genpd_power_off(genpd, true);
if (ret == -EBUSY) { if (ret)
genpd_set_active(genpd); return ret;
goto out;
}
} }
genpd->status = GPD_STATE_POWER_OFF; genpd->status = GPD_STATE_POWER_OFF;
...@@ -638,10 +448,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) ...@@ -638,10 +448,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd_queue_power_off_work(link->master); genpd_queue_power_off_work(link->master);
} }
out: return 0;
genpd->poweroff_task = NULL;
wake_up_all(&genpd->status_wait_queue);
return ret;
} }
/** /**
...@@ -654,9 +461,9 @@ static void genpd_power_off_work_fn(struct work_struct *work) ...@@ -654,9 +461,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work); genpd = container_of(work, struct generic_pm_domain, power_off_work);
genpd_acquire_lock(genpd); mutex_lock(&genpd->lock);
pm_genpd_poweroff(genpd); pm_genpd_poweroff(genpd);
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
} }
/** /**
...@@ -670,7 +477,6 @@ static void genpd_power_off_work_fn(struct work_struct *work) ...@@ -670,7 +477,6 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_suspend(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
struct generic_pm_domain_data *gpd_data;
bool (*stop_ok)(struct device *__dev); bool (*stop_ok)(struct device *__dev);
int ret; int ret;
...@@ -684,10 +490,16 @@ static int pm_genpd_runtime_suspend(struct device *dev) ...@@ -684,10 +490,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (stop_ok && !stop_ok(dev)) if (stop_ok && !stop_ok(dev))
return -EBUSY; return -EBUSY;
ret = genpd_stop_dev(genpd, dev); ret = genpd_save_dev(genpd, dev);
if (ret) if (ret)
return ret; return ret;
ret = genpd_stop_dev(genpd, dev);
if (ret) {
genpd_restore_dev(genpd, dev, true);
return ret;
}
/* /*
* If power.irq_safe is set, this routine will be run with interrupts * If power.irq_safe is set, this routine will be run with interrupts
* off, so it can't use mutexes. * off, so it can't use mutexes.
...@@ -696,16 +508,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) ...@@ -696,16 +508,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
return 0; return 0;
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
/*
* If we have an unknown state of the need_restore flag, it means none
* of the runtime PM callbacks has been invoked yet. Let's update the
* flag to reflect that the current state is active.
*/
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
if (gpd_data->need_restore < 0)
gpd_data->need_restore = 0;
genpd->in_progress++; genpd->in_progress++;
pm_genpd_poweroff(genpd); pm_genpd_poweroff(genpd);
genpd->in_progress--; genpd->in_progress--;
...@@ -725,8 +527,8 @@ static int pm_genpd_runtime_suspend(struct device *dev) ...@@ -725,8 +527,8 @@ static int pm_genpd_runtime_suspend(struct device *dev)
static int pm_genpd_runtime_resume(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
DEFINE_WAIT(wait);
int ret; int ret;
bool timed = true;
dev_dbg(dev, "%s()\n", __func__); dev_dbg(dev, "%s()\n", __func__);
...@@ -735,39 +537,21 @@ static int pm_genpd_runtime_resume(struct device *dev) ...@@ -735,39 +537,21 @@ static int pm_genpd_runtime_resume(struct device *dev)
return -EINVAL; return -EINVAL;
/* If power.irq_safe, the PM domain is never powered off. */ /* If power.irq_safe, the PM domain is never powered off. */
if (dev->power.irq_safe) if (dev->power.irq_safe) {
return genpd_start_dev_no_timing(genpd, dev); timed = false;
goto out;
}
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
ret = __pm_genpd_poweron(genpd); ret = __pm_genpd_poweron(genpd);
if (ret) {
mutex_unlock(&genpd->lock);
return ret;
}
genpd->status = GPD_STATE_BUSY;
genpd->resume_count++;
for (;;) {
prepare_to_wait(&genpd->status_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
/*
* If current is the powering off task, we have been called
* reentrantly from one of the device callbacks, so we should
* not wait.
*/
if (!genpd->poweroff_task || genpd->poweroff_task == current)
break;
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
schedule(); if (ret)
return ret;
mutex_lock(&genpd->lock); out:
} genpd_start_dev(genpd, dev, timed);
finish_wait(&genpd->status_wait_queue, &wait); genpd_restore_dev(genpd, dev, timed);
__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
genpd->resume_count--;
genpd_set_active(genpd);
wake_up_all(&genpd->status_wait_queue);
mutex_unlock(&genpd->lock);
return 0; return 0;
} }
...@@ -883,7 +667,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, ...@@ -883,7 +667,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
{ {
struct gpd_link *link; struct gpd_link *link;
if (genpd->status != GPD_STATE_POWER_OFF) if (genpd->status == GPD_STATE_ACTIVE)
return; return;
list_for_each_entry(link, &genpd->slave_links, slave_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
...@@ -960,14 +744,14 @@ static int pm_genpd_prepare(struct device *dev) ...@@ -960,14 +744,14 @@ static int pm_genpd_prepare(struct device *dev)
if (resume_needed(dev, genpd)) if (resume_needed(dev, genpd))
pm_runtime_resume(dev); pm_runtime_resume(dev);
genpd_acquire_lock(genpd); mutex_lock(&genpd->lock);
if (genpd->prepared_count++ == 0) { if (genpd->prepared_count++ == 0) {
genpd->suspended_count = 0; genpd->suspended_count = 0;
genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
} }
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
if (genpd->suspend_power_off) { if (genpd->suspend_power_off) {
pm_runtime_put_noidle(dev); pm_runtime_put_noidle(dev);
...@@ -1102,7 +886,7 @@ static int pm_genpd_resume_noirq(struct device *dev) ...@@ -1102,7 +886,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true); pm_genpd_sync_poweron(genpd, true);
genpd->suspended_count--; genpd->suspended_count--;
return genpd_start_dev(genpd, dev); return genpd_start_dev(genpd, dev, true);
} }
/** /**
...@@ -1230,7 +1014,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) ...@@ -1230,7 +1014,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd)) if (IS_ERR(genpd))
return -EINVAL; return -EINVAL;
return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true);
} }
/** /**
...@@ -1324,7 +1108,7 @@ static int pm_genpd_restore_noirq(struct device *dev) ...@@ -1324,7 +1108,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true); pm_genpd_sync_poweron(genpd, true);
return genpd_start_dev(genpd, dev); return genpd_start_dev(genpd, dev, true);
} }
/** /**
...@@ -1440,7 +1224,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, ...@@ -1440,7 +1224,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
gpd_data->td = *td; gpd_data->td = *td;
gpd_data->base.dev = dev; gpd_data->base.dev = dev;
gpd_data->need_restore = -1;
gpd_data->td.constraint_changed = true; gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = -1; gpd_data->td.effective_constraint_ns = -1;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
...@@ -1502,7 +1285,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, ...@@ -1502,7 +1285,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR(gpd_data)) if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data); return PTR_ERR(gpd_data);
genpd_acquire_lock(genpd); mutex_lock(&genpd->lock);
if (genpd->prepared_count > 0) { if (genpd->prepared_count > 0) {
ret = -EAGAIN; ret = -EAGAIN;
...@@ -1519,7 +1302,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, ...@@ -1519,7 +1302,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
out: out:
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
if (ret) if (ret)
genpd_free_dev_data(dev, gpd_data); genpd_free_dev_data(dev, gpd_data);
...@@ -1563,7 +1346,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, ...@@ -1563,7 +1346,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
gpd_data = to_gpd_data(pdd); gpd_data = to_gpd_data(pdd);
dev_pm_qos_remove_notifier(dev, &gpd_data->nb); dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
genpd_acquire_lock(genpd); mutex_lock(&genpd->lock);
if (genpd->prepared_count > 0) { if (genpd->prepared_count > 0) {
ret = -EAGAIN; ret = -EAGAIN;
...@@ -1578,14 +1361,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, ...@@ -1578,14 +1361,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
list_del_init(&pdd->list_node); list_del_init(&pdd->list_node);
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
genpd_free_dev_data(dev, gpd_data); genpd_free_dev_data(dev, gpd_data);
return 0; return 0;
out: out:
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
dev_pm_qos_add_notifier(dev, &gpd_data->nb); dev_pm_qos_add_notifier(dev, &gpd_data->nb);
return ret; return ret;
...@@ -1606,17 +1389,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, ...@@ -1606,17 +1389,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|| genpd == subdomain) || genpd == subdomain)
return -EINVAL; return -EINVAL;
start: mutex_lock(&genpd->lock);
genpd_acquire_lock(genpd);
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
if (subdomain->status != GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_ACTIVE) {
mutex_unlock(&subdomain->lock);
genpd_release_lock(genpd);
goto start;
}
if (genpd->status == GPD_STATE_POWER_OFF if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) { && subdomain->status != GPD_STATE_POWER_OFF) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1644,7 +1419,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, ...@@ -1644,7 +1419,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
out: out:
mutex_unlock(&subdomain->lock); mutex_unlock(&subdomain->lock);
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
return ret; return ret;
} }
...@@ -1692,8 +1467,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, ...@@ -1692,8 +1467,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL; return -EINVAL;
start: mutex_lock(&genpd->lock);
genpd_acquire_lock(genpd);
list_for_each_entry(link, &genpd->master_links, master_node) { list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->slave != subdomain) if (link->slave != subdomain)
...@@ -1701,13 +1475,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, ...@@ -1701,13 +1475,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
if (subdomain->status != GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_ACTIVE) {
mutex_unlock(&subdomain->lock);
genpd_release_lock(genpd);
goto start;
}
list_del(&link->master_node); list_del(&link->master_node);
list_del(&link->slave_node); list_del(&link->slave_node);
kfree(link); kfree(link);
...@@ -1720,7 +1487,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, ...@@ -1720,7 +1487,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
break; break;
} }
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
return ret; return ret;
} }
...@@ -1744,7 +1511,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) ...@@ -1744,7 +1511,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
if (IS_ERR_OR_NULL(genpd) || state < 0) if (IS_ERR_OR_NULL(genpd) || state < 0)
return -EINVAL; return -EINVAL;
genpd_acquire_lock(genpd); mutex_lock(&genpd->lock);
if (genpd->cpuidle_data) { if (genpd->cpuidle_data) {
ret = -EEXIST; ret = -EEXIST;
...@@ -1775,7 +1542,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) ...@@ -1775,7 +1542,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
genpd_recalc_cpu_exit_latency(genpd); genpd_recalc_cpu_exit_latency(genpd);
out: out:
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
return ret; return ret;
err: err:
...@@ -1812,7 +1579,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) ...@@ -1812,7 +1579,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
if (IS_ERR_OR_NULL(genpd)) if (IS_ERR_OR_NULL(genpd))
return -EINVAL; return -EINVAL;
genpd_acquire_lock(genpd); mutex_lock(&genpd->lock);
cpuidle_data = genpd->cpuidle_data; cpuidle_data = genpd->cpuidle_data;
if (!cpuidle_data) { if (!cpuidle_data) {
...@@ -1830,7 +1597,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) ...@@ -1830,7 +1597,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
kfree(cpuidle_data); kfree(cpuidle_data);
out: out:
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
return ret; return ret;
} }
...@@ -1912,9 +1679,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, ...@@ -1912,9 +1679,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->in_progress = 0; genpd->in_progress = 0;
atomic_set(&genpd->sd_count, 0); atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
init_waitqueue_head(&genpd->status_wait_queue);
genpd->poweroff_task = NULL;
genpd->resume_count = 0;
genpd->device_count = 0; genpd->device_count = 0;
genpd->max_off_time_ns = -1; genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = true; genpd->max_off_time_changed = true;
...@@ -1952,6 +1716,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd, ...@@ -1952,6 +1716,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
list_add(&genpd->gpd_list_node, &gpd_list); list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock); mutex_unlock(&gpd_list_lock);
} }
EXPORT_SYMBOL_GPL(pm_genpd_init);
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
/* /*
...@@ -2125,7 +1890,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); ...@@ -2125,7 +1890,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/** /**
* genpd_dev_pm_detach - Detach a device from its PM domain. * genpd_dev_pm_detach - Detach a device from its PM domain.
* @dev: Device to attach. * @dev: Device to detach.
* @power_off: Currently not used * @power_off: Currently not used
* *
* Try to locate a corresponding generic PM domain, which the device was * Try to locate a corresponding generic PM domain, which the device was
...@@ -2183,7 +1948,10 @@ static void genpd_dev_pm_sync(struct device *dev) ...@@ -2183,7 +1948,10 @@ static void genpd_dev_pm_sync(struct device *dev)
* Both generic and legacy Samsung-specific DT bindings are supported to keep * Both generic and legacy Samsung-specific DT bindings are supported to keep
* backwards compatibility with existing DTBs. * backwards compatibility with existing DTBs.
* *
* Returns 0 on successfully attached PM domain or negative error code. * Returns 0 on successfully attached PM domain or negative error code. Note
* that if a power-domain exists for the device, but it cannot be found or
* turned on, then return -EPROBE_DEFER to ensure that the device is not
* probed and to re-try again later.
*/ */
int genpd_dev_pm_attach(struct device *dev) int genpd_dev_pm_attach(struct device *dev)
{ {
...@@ -2220,7 +1988,7 @@ int genpd_dev_pm_attach(struct device *dev) ...@@ -2220,7 +1988,7 @@ int genpd_dev_pm_attach(struct device *dev)
dev_dbg(dev, "%s() failed to find PM domain: %ld\n", dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd)); __func__, PTR_ERR(pd));
of_node_put(dev->of_node); of_node_put(dev->of_node);
return PTR_ERR(pd); return -EPROBE_DEFER;
} }
dev_dbg(dev, "adding to PM domain %s\n", pd->name); dev_dbg(dev, "adding to PM domain %s\n", pd->name);
...@@ -2238,14 +2006,15 @@ int genpd_dev_pm_attach(struct device *dev) ...@@ -2238,14 +2006,15 @@ int genpd_dev_pm_attach(struct device *dev)
dev_err(dev, "failed to add to PM domain %s: %d", dev_err(dev, "failed to add to PM domain %s: %d",
pd->name, ret); pd->name, ret);
of_node_put(dev->of_node); of_node_put(dev->of_node);
return ret; goto out;
} }
dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync; dev->pm_domain->sync = genpd_dev_pm_sync;
pm_genpd_poweron(pd); ret = pm_genpd_poweron(pd);
return 0; out:
return ret ? -EPROBE_DEFER : 0;
} }
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
...@@ -2293,9 +2062,6 @@ static int pm_genpd_summary_one(struct seq_file *s, ...@@ -2293,9 +2062,6 @@ static int pm_genpd_summary_one(struct seq_file *s,
{ {
static const char * const status_lookup[] = { static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on", [GPD_STATE_ACTIVE] = "on",
[GPD_STATE_WAIT_MASTER] = "wait-master",
[GPD_STATE_BUSY] = "busy",
[GPD_STATE_REPEAT] = "off-in-progress",
[GPD_STATE_POWER_OFF] = "off" [GPD_STATE_POWER_OFF] = "off"
}; };
struct pm_domain_data *pm_data; struct pm_domain_data *pm_data;
...@@ -2344,7 +2110,7 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) ...@@ -2344,7 +2110,7 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
int ret = 0; int ret = 0;
seq_puts(s, " domain status slaves\n"); seq_puts(s, "domain status slaves\n");
seq_puts(s, " /device runtime status\n"); seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------\n"); seq_puts(s, "----------------------------------------------------------------------\n");
......
...@@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) ...@@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.direct_complete) { if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) { if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev); pm_runtime_disable(dev);
if (pm_runtime_suspended_if_enabled(dev)) if (pm_runtime_status_suspended(dev))
goto Complete; goto Complete;
pm_runtime_enable(dev); pm_runtime_enable(dev);
......
...@@ -13,7 +13,7 @@ menuconfig POWER_AVS ...@@ -13,7 +13,7 @@ menuconfig POWER_AVS
config ROCKCHIP_IODOMAIN config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support" tristate "Rockchip IO domain support"
depends on ARCH_ROCKCHIP && OF depends on POWER_AVS && ARCH_ROCKCHIP && OF
help help
Say y here to enable support io domains on Rockchip SoCs. It is Say y here to enable support io domains on Rockchip SoCs. It is
necessary for the io domain setting of the SoC to match the necessary for the io domain setting of the SoC to match the
......
...@@ -43,6 +43,10 @@ ...@@ -43,6 +43,10 @@
#define RK3288_SOC_CON2_FLASH0 BIT(7) #define RK3288_SOC_CON2_FLASH0 BIT(7)
#define RK3288_SOC_FLASH_SUPPLY_NUM 2 #define RK3288_SOC_FLASH_SUPPLY_NUM 2
#define RK3368_SOC_CON15 0x43c
#define RK3368_SOC_CON15_FLASH0 BIT(14)
#define RK3368_SOC_FLASH_SUPPLY_NUM 2
struct rockchip_iodomain; struct rockchip_iodomain;
/** /**
...@@ -158,6 +162,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod) ...@@ -158,6 +162,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
} }
static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
{
int ret;
u32 val;
/* if no flash supply we should leave things alone */
if (!iod->supplies[RK3368_SOC_FLASH_SUPPLY_NUM].reg)
return;
/*
* set flash0 iodomain to also use this framework
* instead of a special gpio.
*/
val = RK3368_SOC_CON15_FLASH0 | (RK3368_SOC_CON15_FLASH0 << 16);
ret = regmap_write(iod->grf, RK3368_SOC_CON15, val);
if (ret < 0)
dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
}
/* /*
* On the rk3188 the io-domains are handled by a shared register with the * On the rk3188 the io-domains are handled by a shared register with the
* lower 8 bits being still being continuing drive-strength settings. * lower 8 bits being still being continuing drive-strength settings.
...@@ -201,6 +224,34 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = { ...@@ -201,6 +224,34 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
.init = rk3288_iodomain_init, .init = rk3288_iodomain_init,
}; };
static const struct rockchip_iodomain_soc_data soc_data_rk3368 = {
.grf_offset = 0x900,
.supply_names = {
NULL, /* reserved */
"dvp", /* DVPIO_VDD */
"flash0", /* FLASH0_VDD (emmc) */
"wifi", /* APIO2_VDD (sdio0) */
NULL,
"audio", /* APIO3_VDD */
"sdcard", /* SDMMC0_VDD (sdmmc) */
"gpio30", /* APIO1_VDD */
"gpio1830", /* APIO4_VDD (gpujtag) */
},
.init = rk3368_iodomain_init,
};
static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = {
.grf_offset = 0x100,
.supply_names = {
NULL,
NULL,
NULL,
NULL,
"pmu", /*PMU IO domain*/
"vop", /*LCDC IO domain*/
},
};
static const struct of_device_id rockchip_iodomain_match[] = { static const struct of_device_id rockchip_iodomain_match[] = {
{ {
.compatible = "rockchip,rk3188-io-voltage-domain", .compatible = "rockchip,rk3188-io-voltage-domain",
...@@ -210,6 +261,14 @@ static const struct of_device_id rockchip_iodomain_match[] = { ...@@ -210,6 +261,14 @@ static const struct of_device_id rockchip_iodomain_match[] = {
.compatible = "rockchip,rk3288-io-voltage-domain", .compatible = "rockchip,rk3288-io-voltage-domain",
.data = (void *)&soc_data_rk3288 .data = (void *)&soc_data_rk3288
}, },
{
.compatible = "rockchip,rk3368-io-voltage-domain",
.data = (void *)&soc_data_rk3368
},
{
.compatible = "rockchip,rk3368-pmu-io-voltage-domain",
.data = (void *)&soc_data_rk3368_pmu
},
{ /* sentinel */ }, { /* sentinel */ },
}; };
......
...@@ -22,9 +22,6 @@ ...@@ -22,9 +22,6 @@
enum gpd_status { enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */ GPD_STATE_ACTIVE = 0, /* PM domain is active */
GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
GPD_STATE_BUSY, /* Something is happening to the PM domain */
GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
GPD_STATE_POWER_OFF, /* PM domain is off */ GPD_STATE_POWER_OFF, /* PM domain is off */
}; };
...@@ -59,9 +56,6 @@ struct generic_pm_domain { ...@@ -59,9 +56,6 @@ struct generic_pm_domain {
unsigned int in_progress; /* Number of devices being suspended now */ unsigned int in_progress; /* Number of devices being suspended now */
atomic_t sd_count; /* Number of subdomains with power "on" */ atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */ enum gpd_status status; /* Current state of the domain */
wait_queue_head_t status_wait_queue;
struct task_struct *poweroff_task; /* Powering off task */
unsigned int resume_count; /* Number of devices being resumed */
unsigned int device_count; /* Number of devices */ unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */ unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */ unsigned int prepared_count; /* Suspend counter of prepared devices */
...@@ -113,7 +107,6 @@ struct generic_pm_domain_data { ...@@ -113,7 +107,6 @@ struct generic_pm_domain_data {
struct pm_domain_data base; struct pm_domain_data base;
struct gpd_timing_data td; struct gpd_timing_data td;
struct notifier_block nb; struct notifier_block nb;
int need_restore;
}; };
#ifdef CONFIG_PM_GENERIC_DOMAINS #ifdef CONFIG_PM_GENERIC_DOMAINS
...@@ -228,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name) ...@@ -228,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
return -ENOSYS; return -ENOSYS;
} }
static inline void pm_genpd_poweroff_unused(void) {} static inline void pm_genpd_poweroff_unused(void) {}
#define simple_qos_governor NULL
#define pm_domain_always_on_gov NULL
#endif #endif
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
......
...@@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev) ...@@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev)
return dev->power.runtime_status == RPM_SUSPENDED; return dev->power.runtime_status == RPM_SUSPENDED;
} }
static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
{
return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
}
static inline bool pm_runtime_enabled(struct device *dev) static inline bool pm_runtime_enabled(struct device *dev)
{ {
return !dev->power.disable_depth; return !dev->power.disable_depth;
...@@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {} ...@@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; } static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; } static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline void pm_runtime_no_callbacks(struct device *dev) {} static inline void pm_runtime_no_callbacks(struct device *dev) {}
......
...@@ -18,6 +18,16 @@ config SUSPEND_FREEZER ...@@ -18,6 +18,16 @@ config SUSPEND_FREEZER
Turning OFF this setting is NOT recommended! If in doubt, say Y. Turning OFF this setting is NOT recommended! If in doubt, say Y.
config SUSPEND_SKIP_SYNC
bool "Skip kernel's sys_sync() on suspend to RAM/standby"
depends on SUSPEND
depends on EXPERT
help
Skip the kernel sys_sync() before freezing user processes.
Some systems prefer not to pay this cost on every invocation
of suspend, or they are content with invoking sync() from
user-space before invoking suspend. Say Y if that's your case.
config HIBERNATE_CALLBACKS config HIBERNATE_CALLBACKS
bool bool
......
...@@ -484,11 +484,13 @@ static int enter_state(suspend_state_t state) ...@@ -484,11 +484,13 @@ static int enter_state(suspend_state_t state)
if (state == PM_SUSPEND_FREEZE) if (state == PM_SUSPEND_FREEZE)
freeze_begin(); freeze_begin();
#ifndef CONFIG_SUSPEND_SKIP_SYNC
trace_suspend_resume(TPS("sync_filesystems"), 0, true); trace_suspend_resume(TPS("sync_filesystems"), 0, true);
printk(KERN_INFO "PM: Syncing filesystems ... "); printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync(); sys_sync();
printk("done.\n"); printk("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false); trace_suspend_resume(TPS("sync_filesystems"), 0, false);
#endif
pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]); pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]);
error = suspend_prepare(state); error = suspend_prepare(state);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/workqueue.h>
#include "power.h" #include "power.h"
...@@ -83,7 +84,9 @@ static inline void decrement_wakelocks_number(void) {} ...@@ -83,7 +84,9 @@ static inline void decrement_wakelocks_number(void) {}
#define WL_GC_COUNT_MAX 100 #define WL_GC_COUNT_MAX 100
#define WL_GC_TIME_SEC 300 #define WL_GC_TIME_SEC 300
static void __wakelocks_gc(struct work_struct *work);
static LIST_HEAD(wakelocks_lru_list); static LIST_HEAD(wakelocks_lru_list);
static DECLARE_WORK(wakelock_work, __wakelocks_gc);
static unsigned int wakelocks_gc_count; static unsigned int wakelocks_gc_count;
static inline void wakelocks_lru_add(struct wakelock *wl) static inline void wakelocks_lru_add(struct wakelock *wl)
...@@ -96,13 +99,12 @@ static inline void wakelocks_lru_most_recent(struct wakelock *wl) ...@@ -96,13 +99,12 @@ static inline void wakelocks_lru_most_recent(struct wakelock *wl)
list_move(&wl->lru, &wakelocks_lru_list); list_move(&wl->lru, &wakelocks_lru_list);
} }
static void wakelocks_gc(void) static void __wakelocks_gc(struct work_struct *work)
{ {
struct wakelock *wl, *aux; struct wakelock *wl, *aux;
ktime_t now; ktime_t now;
if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) mutex_lock(&wakelocks_lock);
return;
now = ktime_get(); now = ktime_get();
list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
...@@ -127,6 +129,16 @@ static void wakelocks_gc(void) ...@@ -127,6 +129,16 @@ static void wakelocks_gc(void)
} }
} }
wakelocks_gc_count = 0; wakelocks_gc_count = 0;
mutex_unlock(&wakelocks_lock);
}
static void wakelocks_gc(void)
{
if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
return;
schedule_work(&wakelock_work);
} }
#else /* !CONFIG_PM_WAKELOCKS_GC */ #else /* !CONFIG_PM_WAKELOCKS_GC */
static inline void wakelocks_lru_add(struct wakelock *wl) {} static inline void wakelocks_lru_add(struct wakelock *wl) {}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment