Commit c55b51a0 authored by Daniel Lezcano's avatar Daniel Lezcano Committed by Rafael J. Wysocki

cpuidle: Allow idle injection to apply exit latency limit

In some cases it may be useful to specify an exit latency limit for
the idle state to be used during CPU idle time injection.

Instead of duplicating the information in struct cpuidle_device
or propagating the latency limit in the call stack, replace the
use_deepest_state field with forced_latency_limit_ns to represent
that limit, so that the deepest idle state with exit latency within
that limit is forced (i.e. no governors) when it is set.

A zero exit latency limit for forced idle means to use governors in
the usual way (analogous to use_deepest_state equal to "false" before
this change).

Additionally, add play_idle_precise() taking two arguments, the
duration of forced idle and the idle state exit latency limit, both
in nanoseconds, and redefine play_idle() as a wrapper around that
new function.

This change is preparatory, no functional impact is expected.
Suggested-by: default avatarRafael J. Wysocki <rafael@kernel.org>
Signed-off-by: default avatarDaniel Lezcano <daniel.lezcano@linaro.org>
[ rjw: Subject, changelog, cpuidle_use_deepest_state() kerneldoc, whitespace ]
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent cbda56d5
...@@ -99,20 +99,21 @@ static int find_deepest_state(struct cpuidle_driver *drv, ...@@ -99,20 +99,21 @@ static int find_deepest_state(struct cpuidle_driver *drv,
} }
/** /**
* cpuidle_use_deepest_state - Set/clear governor override flag. * cpuidle_use_deepest_state - Set/unset governor override mode.
* @enable: New value of the flag. * @latency_limit_ns: Idle state exit latency limit (or no override if 0).
* *
* Set/unset the current CPU to use the deepest idle state (override governors * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle
* going forward if set). * state with exit latency within @latency_limit_ns (override governors going
* forward), or do not override governors if it is zero.
*/ */
void cpuidle_use_deepest_state(bool enable) void cpuidle_use_deepest_state(u64 latency_limit_ns)
{ {
struct cpuidle_device *dev; struct cpuidle_device *dev;
preempt_disable(); preempt_disable();
dev = cpuidle_get_device(); dev = cpuidle_get_device();
if (dev) if (dev)
dev->use_deepest_state = enable; dev->forced_idle_latency_limit_ns = latency_limit_ns;
preempt_enable(); preempt_enable();
} }
......
...@@ -179,7 +179,12 @@ void arch_cpu_idle_dead(void); ...@@ -179,7 +179,12 @@ void arch_cpu_idle_dead(void);
int cpu_report_state(int cpu); int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu); int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu); void cpu_set_state_online(int cpu);
void play_idle(unsigned long duration_us); void play_idle_precise(u64 duration_ns, u64 latency_ns);
static inline void play_idle(unsigned long duration_us)
{
play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds); bool cpu_wait_death(unsigned int cpu, int seconds);
......
...@@ -85,7 +85,6 @@ struct cpuidle_driver_kobj; ...@@ -85,7 +85,6 @@ struct cpuidle_driver_kobj;
struct cpuidle_device { struct cpuidle_device {
unsigned int registered:1; unsigned int registered:1;
unsigned int enabled:1; unsigned int enabled:1;
unsigned int use_deepest_state:1;
unsigned int poll_time_limit:1; unsigned int poll_time_limit:1;
unsigned int cpu; unsigned int cpu;
ktime_t next_hrtimer; ktime_t next_hrtimer;
...@@ -93,6 +92,7 @@ struct cpuidle_device { ...@@ -93,6 +92,7 @@ struct cpuidle_device {
int last_state_idx; int last_state_idx;
u64 last_residency_ns; u64 last_residency_ns;
u64 poll_limit_ns; u64 poll_limit_ns;
u64 forced_idle_latency_limit_ns;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver; struct cpuidle_driver_kobj *kobj_driver;
...@@ -216,7 +216,7 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, ...@@ -216,7 +216,7 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev); struct cpuidle_device *dev);
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev); struct cpuidle_device *dev);
extern void cpuidle_use_deepest_state(bool enable); extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else #else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev)
...@@ -224,7 +224,7 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, ...@@ -224,7 +224,7 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev)
{return -ENODEV; } {return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable) static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{ {
} }
#endif #endif
......
...@@ -165,7 +165,7 @@ static void cpuidle_idle_call(void) ...@@ -165,7 +165,7 @@ static void cpuidle_idle_call(void)
* until a proper wakeup interrupt happens. * until a proper wakeup interrupt happens.
*/ */
if (idle_should_enter_s2idle() || dev->use_deepest_state) { if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
if (idle_should_enter_s2idle()) { if (idle_should_enter_s2idle()) {
rcu_idle_enter(); rcu_idle_enter();
...@@ -311,7 +311,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) ...@@ -311,7 +311,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
void play_idle(unsigned long duration_us) void play_idle_precise(u64 duration_ns, u64 latency_ns)
{ {
struct idle_timer it; struct idle_timer it;
...@@ -323,29 +323,29 @@ void play_idle(unsigned long duration_us) ...@@ -323,29 +323,29 @@ void play_idle(unsigned long duration_us)
WARN_ON_ONCE(current->nr_cpus_allowed != 1); WARN_ON_ONCE(current->nr_cpus_allowed != 1);
WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
WARN_ON_ONCE(!duration_us); WARN_ON_ONCE(!duration_ns);
rcu_sleep_check(); rcu_sleep_check();
preempt_disable(); preempt_disable();
current->flags |= PF_IDLE; current->flags |= PF_IDLE;
cpuidle_use_deepest_state(true); cpuidle_use_deepest_state(latency_ns);
it.done = 0; it.done = 0;
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
it.timer.function = idle_inject_timer_fn; it.timer.function = idle_inject_timer_fn;
hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC), hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
HRTIMER_MODE_REL_PINNED); HRTIMER_MODE_REL_PINNED);
while (!READ_ONCE(it.done)) while (!READ_ONCE(it.done))
do_idle(); do_idle();
cpuidle_use_deepest_state(false); cpuidle_use_deepest_state(0);
current->flags &= ~PF_IDLE; current->flags &= ~PF_IDLE;
preempt_fold_need_resched(); preempt_fold_need_resched();
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL_GPL(play_idle); EXPORT_SYMBOL_GPL(play_idle_precise);
void cpu_startup_entry(enum cpuhp_state state) void cpu_startup_entry(enum cpuhp_state state)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment