Commit c43aa3bd authored by venkatesh.pallipadi@intel.com's avatar venkatesh.pallipadi@intel.com Committed by Dave Jones

[CPUFREQ][2/6] cpufreq: Change load calculation in ondemand for software coordination

Change the load calculation algorithm in ondemand to work well with software
coordination of frequency across the dependent cpus.

Multiply individual CPU utilization with the average freq of that logical CPU
during the measurement interval (using getavg call). And find the max CPU
utilization number in terms of CPU freq. That number is then used to
get to the target freq for next sampling interval.
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarDave Jones <davej@redhat.com>
parent bf0b90e3
...@@ -334,9 +334,7 @@ static struct attribute_group dbs_attr_group = { ...@@ -334,9 +334,7 @@ static struct attribute_group dbs_attr_group = {
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{ {
unsigned int idle_ticks, total_ticks; unsigned int max_load_freq;
unsigned int load = 0;
cputime64_t cur_jiffies;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
unsigned int j; unsigned int j;
...@@ -346,13 +344,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -346,13 +344,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
this_dbs_info->freq_lo = 0; this_dbs_info->freq_lo = 0;
policy = this_dbs_info->cur_policy; policy = this_dbs_info->cur_policy;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
this_dbs_info->prev_cpu_wall);
this_dbs_info->prev_cpu_wall = get_jiffies_64();
if (!total_ticks)
return;
/* /*
* Every sampling_rate, we check, if current idle time is less * Every sampling_rate, we check, if current idle time is less
* than 20% (default), then we try to increase frequency * than 20% (default), then we try to increase frequency
...@@ -365,27 +357,46 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -365,27 +357,46 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
* 5% (default) of current frequency * 5% (default) of current frequency
*/ */
/* Get Idle Time */ /* Get Absolute Load - in terms of freq */
idle_ticks = UINT_MAX; max_load_freq = 0;
for_each_cpu_mask_nr(j, policy->cpus) { for_each_cpu_mask_nr(j, policy->cpus) {
cputime64_t total_idle_ticks;
unsigned int tmp_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info; struct cpu_dbs_info_s *j_dbs_info;
cputime64_t cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;
unsigned int load, load_freq;
int freq_avg;
j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info = &per_cpu(cpu_dbs_info, j);
total_idle_ticks = get_cpu_idle_time(j); cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, wall_time = (unsigned int) cputime64_sub(cur_wall_time,
j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
cur_idle_time = get_cpu_idle_time(j);
idle_time = (unsigned int) cputime64_sub(cur_idle_time,
j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = total_idle_ticks; j_dbs_info->prev_cpu_idle = cur_idle_time;
if (tmp_idle_ticks < idle_ticks) if (unlikely(wall_time <= idle_time ||
idle_ticks = tmp_idle_ticks; (cputime_to_msecs(wall_time) <
dbs_tuners_ins.sampling_rate / (2 * 1000)))) {
continue;
}
load = 100 * (wall_time - idle_time) / wall_time;
freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
load_freq = load * freq_avg;
if (load_freq > max_load_freq)
max_load_freq = load_freq;
} }
if (likely(total_ticks > idle_ticks))
load = (100 * (total_ticks - idle_ticks)) / total_ticks;
/* Check for frequency increase */ /* Check for frequency increase */
if (load > dbs_tuners_ins.up_threshold) { if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
/* if we are already at full speed then break out early */ /* if we are already at full speed then break out early */
if (!dbs_tuners_ins.powersave_bias) { if (!dbs_tuners_ins.powersave_bias) {
if (policy->cur == policy->max) if (policy->cur == policy->max)
...@@ -412,15 +423,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -412,15 +423,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
* can support the current CPU usage without triggering the up * can support the current CPU usage without triggering the up
* policy. To be safe, we focus 10 points under the threshold. * policy. To be safe, we focus 10 points under the threshold.
*/ */
if (load < (dbs_tuners_ins.up_threshold - 10)) { if (max_load_freq < (dbs_tuners_ins.up_threshold - 10) * policy->cur) {
unsigned int freq_next, freq_cur; unsigned int freq_next;
freq_next = max_load_freq / (dbs_tuners_ins.up_threshold - 10);
freq_cur = __cpufreq_driver_getavg(policy, policy->cpu);
if (!freq_cur)
freq_cur = policy->cur;
freq_next = (freq_cur * load) /
(dbs_tuners_ins.up_threshold - 10);
if (!dbs_tuners_ins.powersave_bias) { if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next, __cpufreq_driver_target(policy, freq_next,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment