Commit c1d51f68 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

cpuidle: Use nanoseconds as the unit of time

Currently, the cpuidle subsystem uses microseconds as the unit of
time which (among other things) causes the idle loop to incur some
integer division overhead for no clear benefit.

In order to allow cpuidle to measure time in nanoseconds, add two
new fields, exit_latency_ns and target_residency_ns, to represent the
exit latency and target residency of an idle state in nanoseconds,
respectively, to struct cpuidle_state and initialize them with the
help of the corresponding values in microseconds provided by drivers.
Additionally, change cpuidle_governor_latency_req() to return the
idle state exit latency constraint in nanoseconds.

Also meeasure idle state residency (last_residency_ns in struct
cpuidle_device and time_ns in struct cpuidle_driver) in nanoseconds
and update the cpuidle core and governors accordingly.

However, the menu governor still computes typical intervals in
microseconds to avoid integer overflows.
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarDoug Smythies <dsmythies@telus.net>
Tested-by: default avatarDoug Smythies <dsmythies@telus.net>
parent 99e98d3f
...@@ -75,24 +75,24 @@ int cpuidle_play_dead(void) ...@@ -75,24 +75,24 @@ int cpuidle_play_dead(void)
static int find_deepest_state(struct cpuidle_driver *drv, static int find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, struct cpuidle_device *dev,
unsigned int max_latency, u64 max_latency_ns,
unsigned int forbidden_flags, unsigned int forbidden_flags,
bool s2idle) bool s2idle)
{ {
unsigned int latency_req = 0; u64 latency_req = 0;
int i, ret = 0; int i, ret = 0;
for (i = 1; i < drv->state_count; i++) { for (i = 1; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i]; struct cpuidle_state *s = &drv->states[i];
if (dev->states_usage[i].disable || if (dev->states_usage[i].disable ||
s->exit_latency <= latency_req || s->exit_latency_ns <= latency_req ||
s->exit_latency > max_latency || s->exit_latency_ns > max_latency_ns ||
(s->flags & forbidden_flags) || (s->flags & forbidden_flags) ||
(s2idle && !s->enter_s2idle)) (s2idle && !s->enter_s2idle))
continue; continue;
latency_req = s->exit_latency; latency_req = s->exit_latency_ns;
ret = i; ret = i;
} }
return ret; return ret;
...@@ -124,7 +124,7 @@ void cpuidle_use_deepest_state(bool enable) ...@@ -124,7 +124,7 @@ void cpuidle_use_deepest_state(bool enable)
int cpuidle_find_deepest_state(struct cpuidle_driver *drv, int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev)
{ {
return find_deepest_state(drv, dev, UINT_MAX, 0, false); return find_deepest_state(drv, dev, U64_MAX, 0, false);
} }
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
...@@ -180,7 +180,7 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -180,7 +180,7 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* that interrupts won't be enabled when it exits and allows the tick to * that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely. * be frozen safely.
*/ */
index = find_deepest_state(drv, dev, UINT_MAX, 0, true); index = find_deepest_state(drv, dev, U64_MAX, 0, true);
if (index > 0) if (index > 0)
enter_s2idle_proper(drv, dev, index); enter_s2idle_proper(drv, dev, index);
...@@ -209,7 +209,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, ...@@ -209,7 +209,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* CPU as a broadcast timer, this call may fail if it is not available. * CPU as a broadcast timer, this call may fail if it is not available.
*/ */
if (broadcast && tick_broadcast_enter()) { if (broadcast && tick_broadcast_enter()) {
index = find_deepest_state(drv, dev, target_state->exit_latency, index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
CPUIDLE_FLAG_TIMER_STOP, false); CPUIDLE_FLAG_TIMER_STOP, false);
if (index < 0) { if (index < 0) {
default_idle_call(); default_idle_call();
...@@ -247,7 +247,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, ...@@ -247,7 +247,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
local_irq_enable(); local_irq_enable();
if (entered_state >= 0) { if (entered_state >= 0) {
s64 diff, delay = drv->states[entered_state].exit_latency; s64 diff, delay = drv->states[entered_state].exit_latency_ns;
int i; int i;
/* /*
...@@ -255,15 +255,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, ...@@ -255,15 +255,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* This can be moved to within driver enter routine, * This can be moved to within driver enter routine,
* but that results in multiple copies of same code. * but that results in multiple copies of same code.
*/ */
diff = ktime_us_delta(time_end, time_start); diff = ktime_sub(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
dev->last_residency = (int)diff; dev->last_residency_ns = diff;
dev->states_usage[entered_state].time += dev->last_residency; dev->states_usage[entered_state].time_ns += diff;
dev->states_usage[entered_state].usage++; dev->states_usage[entered_state].usage++;
if (diff < drv->states[entered_state].target_residency) { if (diff < drv->states[entered_state].target_residency_ns) {
for (i = entered_state - 1; i >= 0; i--) { for (i = entered_state - 1; i >= 0; i--) {
if (dev->states_usage[i].disable) if (dev->states_usage[i].disable)
continue; continue;
...@@ -281,14 +279,14 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, ...@@ -281,14 +279,14 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* Update if a deeper state would have been a * Update if a deeper state would have been a
* better match for the observed idle duration. * better match for the observed idle duration.
*/ */
if (diff - delay >= drv->states[i].target_residency) if (diff - delay >= drv->states[i].target_residency_ns)
dev->states_usage[entered_state].below++; dev->states_usage[entered_state].below++;
break; break;
} }
} }
} else { } else {
dev->last_residency = 0; dev->last_residency_ns = 0;
} }
return entered_state; return entered_state;
...@@ -381,7 +379,7 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv, ...@@ -381,7 +379,7 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv,
if (dev->states_usage[i].disable) if (dev->states_usage[i].disable)
continue; continue;
limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC; limit_ns = (u64)drv->states[i].target_residency_ns;
} }
dev->poll_limit_ns = limit_ns; dev->poll_limit_ns = limit_ns;
...@@ -552,7 +550,7 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev) ...@@ -552,7 +550,7 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
static void __cpuidle_device_init(struct cpuidle_device *dev) static void __cpuidle_device_init(struct cpuidle_device *dev)
{ {
memset(dev->states_usage, 0, sizeof(dev->states_usage)); memset(dev->states_usage, 0, sizeof(dev->states_usage));
dev->last_residency = 0; dev->last_residency_ns = 0;
dev->next_hrtimer = 0; dev->next_hrtimer = 0;
} }
......
...@@ -165,16 +165,27 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) ...@@ -165,16 +165,27 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
if (!drv->cpumask) if (!drv->cpumask)
drv->cpumask = (struct cpumask *)cpu_possible_mask; drv->cpumask = (struct cpumask *)cpu_possible_mask;
for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
/* /*
* Look for the timer stop flag in the different states, so that we know * Look for the timer stop flag in the different states and if
* if the broadcast timer has to be set up. The loop is in the reverse * it is found, indicate that the broadcast timer has to be set
* order, because usually one of the deeper states have this flag set. * up.
*/ */
for (i = drv->state_count - 1; i >= 0 ; i--) { if (s->flags & CPUIDLE_FLAG_TIMER_STOP)
if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
drv->bctimer = 1; drv->bctimer = 1;
break;
} /*
* The core will use the target residency and exit latency
* values in nanoseconds, but allow drivers to provide them in
* microseconds too.
*/
if (s->target_residency > 0)
s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
if (s->exit_latency > 0)
s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
} }
} }
......
...@@ -107,11 +107,14 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) ...@@ -107,11 +107,14 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
* cpuidle_governor_latency_req - Compute a latency constraint for CPU * cpuidle_governor_latency_req - Compute a latency constraint for CPU
* @cpu: Target CPU * @cpu: Target CPU
*/ */
int cpuidle_governor_latency_req(unsigned int cpu) s64 cpuidle_governor_latency_req(unsigned int cpu)
{ {
int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
struct device *device = get_cpu_device(cpu); struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device); int device_req = dev_pm_qos_raw_resume_latency(device);
return device_req < global_req ? device_req : global_req; if (device_req > global_req)
device_req = global_req;
return (s64)device_req * NSEC_PER_USEC;
} }
...@@ -49,7 +49,7 @@ static int haltpoll_select(struct cpuidle_driver *drv, ...@@ -49,7 +49,7 @@ static int haltpoll_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev, struct cpuidle_device *dev,
bool *stop_tick) bool *stop_tick)
{ {
int latency_req = cpuidle_governor_latency_req(dev->cpu); s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
if (!drv->state_count || latency_req == 0) { if (!drv->state_count || latency_req == 0) {
*stop_tick = false; *stop_tick = false;
...@@ -75,10 +75,9 @@ static int haltpoll_select(struct cpuidle_driver *drv, ...@@ -75,10 +75,9 @@ static int haltpoll_select(struct cpuidle_driver *drv,
return 0; return 0;
} }
static void adjust_poll_limit(struct cpuidle_device *dev, unsigned int block_us) static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns)
{ {
unsigned int val; unsigned int val;
u64 block_ns = block_us*NSEC_PER_USEC;
/* Grow cpu_halt_poll_us if /* Grow cpu_halt_poll_us if
* cpu_halt_poll_us < block_ns < guest_halt_poll_us * cpu_halt_poll_us < block_ns < guest_halt_poll_us
...@@ -115,7 +114,7 @@ static void haltpoll_reflect(struct cpuidle_device *dev, int index) ...@@ -115,7 +114,7 @@ static void haltpoll_reflect(struct cpuidle_device *dev, int index)
dev->last_state_idx = index; dev->last_state_idx = index;
if (index != 0) if (index != 0)
adjust_poll_limit(dev, dev->last_residency); adjust_poll_limit(dev, dev->last_residency_ns);
} }
/** /**
......
...@@ -27,8 +27,8 @@ struct ladder_device_state { ...@@ -27,8 +27,8 @@ struct ladder_device_state {
struct { struct {
u32 promotion_count; u32 promotion_count;
u32 demotion_count; u32 demotion_count;
u32 promotion_time; u64 promotion_time_ns;
u32 demotion_time; u64 demotion_time_ns;
} threshold; } threshold;
struct { struct {
int promotion_count; int promotion_count;
...@@ -68,9 +68,10 @@ static int ladder_select_state(struct cpuidle_driver *drv, ...@@ -68,9 +68,10 @@ static int ladder_select_state(struct cpuidle_driver *drv,
{ {
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state; struct ladder_device_state *last_state;
int last_residency, last_idx = dev->last_state_idx; int last_idx = dev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = cpuidle_governor_latency_req(dev->cpu); s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
s64 last_residency;
/* Special case when user has set very strict latency requirement */ /* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) { if (unlikely(latency_req == 0)) {
...@@ -80,13 +81,13 @@ static int ladder_select_state(struct cpuidle_driver *drv, ...@@ -80,13 +81,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx]; last_state = &ldev->states[last_idx];
last_residency = dev->last_residency - drv->states[last_idx].exit_latency; last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns;
/* consider promotion */ /* consider promotion */
if (last_idx < drv->state_count - 1 && if (last_idx < drv->state_count - 1 &&
!dev->states_usage[last_idx + 1].disable && !dev->states_usage[last_idx + 1].disable &&
last_residency > last_state->threshold.promotion_time && last_residency > last_state->threshold.promotion_time_ns &&
drv->states[last_idx + 1].exit_latency <= latency_req) { drv->states[last_idx + 1].exit_latency_ns <= latency_req) {
last_state->stats.promotion_count++; last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0; last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
...@@ -98,11 +99,11 @@ static int ladder_select_state(struct cpuidle_driver *drv, ...@@ -98,11 +99,11 @@ static int ladder_select_state(struct cpuidle_driver *drv,
/* consider demotion */ /* consider demotion */
if (last_idx > first_idx && if (last_idx > first_idx &&
(dev->states_usage[last_idx].disable || (dev->states_usage[last_idx].disable ||
drv->states[last_idx].exit_latency > latency_req)) { drv->states[last_idx].exit_latency_ns > latency_req)) {
int i; int i;
for (i = last_idx - 1; i > first_idx; i--) { for (i = last_idx - 1; i > first_idx; i--) {
if (drv->states[i].exit_latency <= latency_req) if (drv->states[i].exit_latency_ns <= latency_req)
break; break;
} }
ladder_do_selection(dev, ldev, last_idx, i); ladder_do_selection(dev, ldev, last_idx, i);
...@@ -110,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, ...@@ -110,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
} }
if (last_idx > first_idx && if (last_idx > first_idx &&
last_residency < last_state->threshold.demotion_time) { last_residency < last_state->threshold.demotion_time_ns) {
last_state->stats.demotion_count++; last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0; last_state->stats.promotion_count = 0;
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
...@@ -150,9 +151,9 @@ static int ladder_enable_device(struct cpuidle_driver *drv, ...@@ -150,9 +151,9 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
lstate->threshold.demotion_count = DEMOTION_COUNT; lstate->threshold.demotion_count = DEMOTION_COUNT;
if (i < drv->state_count - 1) if (i < drv->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency; lstate->threshold.promotion_time_ns = state->exit_latency_ns;
if (i > first_idx) if (i > first_idx)
lstate->threshold.demotion_time = state->exit_latency; lstate->threshold.demotion_time_ns = state->exit_latency_ns;
} }
return 0; return 0;
......
This diff is collapsed.
...@@ -104,7 +104,7 @@ struct teo_cpu { ...@@ -104,7 +104,7 @@ struct teo_cpu {
u64 sleep_length_ns; u64 sleep_length_ns;
struct teo_idle_state states[CPUIDLE_STATE_MAX]; struct teo_idle_state states[CPUIDLE_STATE_MAX];
int interval_idx; int interval_idx;
unsigned int intervals[INTERVALS]; u64 intervals[INTERVALS];
}; };
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
...@@ -117,9 +117,8 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); ...@@ -117,9 +117,8 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{ {
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
int i, idx_hit = -1, idx_timer = -1; int i, idx_hit = -1, idx_timer = -1;
unsigned int measured_us; u64 measured_ns;
if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) { if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
/* /*
...@@ -127,23 +126,21 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -127,23 +126,21 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* enough to the closest timer event expected at the idle state * enough to the closest timer event expected at the idle state
* selection time to be discarded. * selection time to be discarded.
*/ */
measured_us = UINT_MAX; measured_ns = U64_MAX;
} else { } else {
unsigned int lat; u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
lat = drv->states[dev->last_state_idx].exit_latency; measured_ns = cpu_data->time_span_ns;
measured_us = ktime_to_us(cpu_data->time_span_ns);
/* /*
* The delay between the wakeup and the first instruction * The delay between the wakeup and the first instruction
* executed by the CPU is not likely to be worst-case every * executed by the CPU is not likely to be worst-case every
* time, so take 1/2 of the exit latency as a very rough * time, so take 1/2 of the exit latency as a very rough
* approximation of the average of it. * approximation of the average of it.
*/ */
if (measured_us >= lat) if (measured_ns >= lat_ns)
measured_us -= lat / 2; measured_ns -= lat_ns / 2;
else else
measured_us /= 2; measured_ns /= 2;
} }
/* /*
...@@ -155,9 +152,9 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -155,9 +152,9 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT; cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
if (drv->states[i].target_residency <= sleep_length_us) { if (drv->states[i].target_residency_ns <= cpu_data->sleep_length_ns) {
idx_timer = i; idx_timer = i;
if (drv->states[i].target_residency <= measured_us) if (drv->states[i].target_residency_ns <= measured_ns)
idx_hit = i; idx_hit = i;
} }
} }
...@@ -193,7 +190,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -193,7 +190,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* Save idle duration values corresponding to non-timer wakeups for * Save idle duration values corresponding to non-timer wakeups for
* pattern detection. * pattern detection.
*/ */
cpu_data->intervals[cpu_data->interval_idx++] = measured_us; cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
if (cpu_data->interval_idx > INTERVALS) if (cpu_data->interval_idx > INTERVALS)
cpu_data->interval_idx = 0; cpu_data->interval_idx = 0;
} }
...@@ -203,11 +200,11 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -203,11 +200,11 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* @drv: cpuidle driver containing state data. * @drv: cpuidle driver containing state data.
* @dev: Target CPU. * @dev: Target CPU.
* @state_idx: Index of the capping idle state. * @state_idx: Index of the capping idle state.
* @duration_us: Idle duration value to match. * @duration_ns: Idle duration value to match.
*/ */
static int teo_find_shallower_state(struct cpuidle_driver *drv, static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx, struct cpuidle_device *dev, int state_idx,
unsigned int duration_us) u64 duration_ns)
{ {
int i; int i;
...@@ -216,7 +213,7 @@ static int teo_find_shallower_state(struct cpuidle_driver *drv, ...@@ -216,7 +213,7 @@ static int teo_find_shallower_state(struct cpuidle_driver *drv,
continue; continue;
state_idx = i; state_idx = i;
if (drv->states[i].target_residency <= duration_us) if (drv->states[i].target_residency_ns <= duration_ns)
break; break;
} }
return state_idx; return state_idx;
...@@ -232,8 +229,9 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -232,8 +229,9 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick) bool *stop_tick)
{ {
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
int latency_req = cpuidle_governor_latency_req(dev->cpu); s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
unsigned int duration_us, hits, misses, early_hits; u64 duration_ns;
unsigned int hits, misses, early_hits;
int max_early_idx, constraint_idx, idx, i; int max_early_idx, constraint_idx, idx, i;
ktime_t delta_tick; ktime_t delta_tick;
...@@ -244,8 +242,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -244,8 +242,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
cpu_data->time_span_ns = local_clock(); cpu_data->time_span_ns = local_clock();
cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick); duration_ns = tick_nohz_get_sleep_length(&delta_tick);
duration_us = ktime_to_us(cpu_data->sleep_length_ns); cpu_data->sleep_length_ns = duration_ns;
hits = 0; hits = 0;
misses = 0; misses = 0;
...@@ -262,7 +260,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -262,7 +260,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* Ignore disabled states with target residencies beyond * Ignore disabled states with target residencies beyond
* the anticipated idle duration. * the anticipated idle duration.
*/ */
if (s->target_residency > duration_us) if (s->target_residency_ns > duration_ns)
continue; continue;
/* /*
...@@ -301,7 +299,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -301,7 +299,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* shallow for that role. * shallow for that role.
*/ */
if (!(tick_nohz_tick_stopped() && if (!(tick_nohz_tick_stopped() &&
drv->states[idx].target_residency < TICK_USEC)) { drv->states[idx].target_residency_ns < TICK_NSEC)) {
early_hits = cpu_data->states[i].early_hits; early_hits = cpu_data->states[i].early_hits;
max_early_idx = idx; max_early_idx = idx;
} }
...@@ -315,10 +313,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -315,10 +313,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
misses = cpu_data->states[i].misses; misses = cpu_data->states[i].misses;
} }
if (s->target_residency > duration_us) if (s->target_residency_ns > duration_ns)
break; break;
if (s->exit_latency > latency_req && constraint_idx > i) if (s->exit_latency_ns > latency_req && constraint_idx > i)
constraint_idx = i; constraint_idx = i;
idx = i; idx = i;
...@@ -327,7 +325,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -327,7 +325,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (early_hits < cpu_data->states[i].early_hits && if (early_hits < cpu_data->states[i].early_hits &&
!(tick_nohz_tick_stopped() && !(tick_nohz_tick_stopped() &&
drv->states[i].target_residency < TICK_USEC)) { drv->states[i].target_residency_ns < TICK_NSEC)) {
early_hits = cpu_data->states[i].early_hits; early_hits = cpu_data->states[i].early_hits;
max_early_idx = i; max_early_idx = i;
} }
...@@ -343,7 +341,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -343,7 +341,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/ */
if (hits <= misses && max_early_idx >= 0) { if (hits <= misses && max_early_idx >= 0) {
idx = max_early_idx; idx = max_early_idx;
duration_us = drv->states[idx].target_residency; duration_ns = drv->states[idx].target_residency_ns;
} }
/* /*
...@@ -364,9 +362,9 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -364,9 +362,9 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* the current expected idle duration value. * the current expected idle duration value.
*/ */
for (i = 0; i < INTERVALS; i++) { for (i = 0; i < INTERVALS; i++) {
unsigned int val = cpu_data->intervals[i]; u64 val = cpu_data->intervals[i];
if (val >= duration_us) if (val >= duration_ns)
continue; continue;
count++; count++;
...@@ -378,17 +376,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -378,17 +376,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* values are in the interesting range. * values are in the interesting range.
*/ */
if (count > INTERVALS / 2) { if (count > INTERVALS / 2) {
unsigned int avg_us = div64_u64(sum, count); u64 avg_ns = div64_u64(sum, count);
/* /*
* Avoid spending too much time in an idle state that * Avoid spending too much time in an idle state that
* would be too shallow. * would be too shallow.
*/ */
if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) { if (!(tick_nohz_tick_stopped() && avg_ns < TICK_NSEC)) {
duration_us = avg_us; duration_ns = avg_ns;
if (drv->states[idx].target_residency > avg_us) if (drv->states[idx].target_residency_ns > avg_ns)
idx = teo_find_shallower_state(drv, dev, idx = teo_find_shallower_state(drv, dev,
idx, avg_us); idx, avg_ns);
} }
} }
} }
...@@ -398,9 +396,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -398,9 +396,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration is shorter than the tick period length. * expected idle duration is shorter than the tick period length.
*/ */
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) { duration_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
unsigned int delta_tick_us = ktime_to_us(delta_tick);
*stop_tick = false; *stop_tick = false;
/* /*
...@@ -409,8 +405,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -409,8 +405,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* till the closest timer including the tick, try to correct * till the closest timer including the tick, try to correct
* that. * that.
*/ */
if (idx > 0 && drv->states[idx].target_residency > delta_tick_us) if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick)
idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us); idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
} }
return idx; return idx;
...@@ -454,7 +450,7 @@ static int teo_enable_device(struct cpuidle_driver *drv, ...@@ -454,7 +450,7 @@ static int teo_enable_device(struct cpuidle_driver *drv,
memset(cpu_data, 0, sizeof(*cpu_data)); memset(cpu_data, 0, sizeof(*cpu_data));
for (i = 0; i < INTERVALS; i++) for (i = 0; i < INTERVALS; i++)
cpu_data->intervals[i] = UINT_MAX; cpu_data->intervals[i] = U64_MAX;
return 0; return 0;
} }
......
...@@ -49,6 +49,8 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv) ...@@ -49,6 +49,8 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
state->exit_latency = 0; state->exit_latency = 0;
state->target_residency = 0; state->target_residency = 0;
state->exit_latency_ns = 0;
state->target_residency_ns = 0;
state->power_usage = -1; state->power_usage = -1;
state->enter = poll_idle; state->enter = poll_idle;
state->disabled = false; state->disabled = false;
......
...@@ -273,16 +273,30 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ ...@@ -273,16 +273,30 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%s\n", state->_name);\ return sprintf(buf, "%s\n", state->_name);\
} }
define_show_state_function(exit_latency) #define define_show_state_time_function(_name) \
define_show_state_function(target_residency) static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
}
define_show_state_time_function(exit_latency)
define_show_state_time_function(target_residency)
define_show_state_function(power_usage) define_show_state_function(power_usage)
define_show_state_ull_function(usage) define_show_state_ull_function(usage)
define_show_state_ull_function(time)
define_show_state_str_function(name) define_show_state_str_function(name)
define_show_state_str_function(desc) define_show_state_str_function(desc)
define_show_state_ull_function(above) define_show_state_ull_function(above)
define_show_state_ull_function(below) define_show_state_ull_function(below)
static ssize_t show_state_time(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
}
static ssize_t show_state_disable(struct cpuidle_state *state, static ssize_t show_state_disable(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage, struct cpuidle_state_usage *state_usage,
char *buf) char *buf)
......
...@@ -35,7 +35,7 @@ struct cpuidle_driver; ...@@ -35,7 +35,7 @@ struct cpuidle_driver;
struct cpuidle_state_usage { struct cpuidle_state_usage {
unsigned long long disable; unsigned long long disable;
unsigned long long usage; unsigned long long usage;
unsigned long long time; /* in US */ u64 time_ns;
unsigned long long above; /* Number of times it's been too deep */ unsigned long long above; /* Number of times it's been too deep */
unsigned long long below; /* Number of times it's been too shallow */ unsigned long long below; /* Number of times it's been too shallow */
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
...@@ -48,6 +48,8 @@ struct cpuidle_state { ...@@ -48,6 +48,8 @@ struct cpuidle_state {
char name[CPUIDLE_NAME_LEN]; char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN]; char desc[CPUIDLE_DESC_LEN];
u64 exit_latency_ns;
u64 target_residency_ns;
unsigned int flags; unsigned int flags;
unsigned int exit_latency; /* in US */ unsigned int exit_latency; /* in US */
int power_usage; /* in mW */ int power_usage; /* in mW */
...@@ -89,7 +91,7 @@ struct cpuidle_device { ...@@ -89,7 +91,7 @@ struct cpuidle_device {
ktime_t next_hrtimer; ktime_t next_hrtimer;
int last_state_idx; int last_state_idx;
int last_residency; u64 last_residency_ns;
u64 poll_limit_ns; u64 poll_limit_ns;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
...@@ -263,7 +265,7 @@ struct cpuidle_governor { ...@@ -263,7 +265,7 @@ struct cpuidle_governor {
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov); extern int cpuidle_register_governor(struct cpuidle_governor *gov);
extern int cpuidle_governor_latency_req(unsigned int cpu); extern s64 cpuidle_governor_latency_req(unsigned int cpu);
#else #else
static inline int cpuidle_register_governor(struct cpuidle_governor *gov) static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;} {return 0;}
......
...@@ -104,7 +104,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -104,7 +104,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* update no idle residency and return. * update no idle residency and return.
*/ */
if (current_clr_polling_and_test()) { if (current_clr_polling_and_test()) {
dev->last_residency = 0; dev->last_residency_ns = 0;
local_irq_enable(); local_irq_enable();
return -EBUSY; return -EBUSY;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment