Commit d253d2a5 authored by Brennan Shacklett's avatar Brennan Shacklett Committed by Rafael J. Wysocki

intel_pstate: Improve accuracy by not truncating until final result

This patch addresses Bug 60727
(https://bugzilla.kernel.org/show_bug.cgi?id=60727)
which was due to the truncation of intermediate values in the
calculations, which causes the code to consistently underestimate the
current cpu frequency, specifically 100% cpu utilization was truncated
down to the setpoint of 97%. This patch fixes the problem by keeping
the results of all intermediate calculations as fixed point numbers
rather scaling them back and forth between integers and fixed point.

References: https://bugzilla.kernel.org/show_bug.cgi?id=60727Signed-off-by: default avatarBrennan Shacklett <bpshacklett@gmail.com>
Acked-by: default avatarDirk Brandewie <dirk.j.brandewie@intel.com>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 31d141e3
...@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) ...@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
} }
struct sample { struct sample {
int core_pct_busy; int32_t core_pct_busy;
u64 aperf; u64 aperf;
u64 mperf; u64 mperf;
int freq; int freq;
...@@ -68,7 +68,7 @@ struct _pid { ...@@ -68,7 +68,7 @@ struct _pid {
int32_t i_gain; int32_t i_gain;
int32_t d_gain; int32_t d_gain;
int deadband; int deadband;
int last_err; int32_t last_err;
}; };
struct cpudata { struct cpudata {
...@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent) ...@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
} }
static signed int pid_calc(struct _pid *pid, int busy) static signed int pid_calc(struct _pid *pid, int32_t busy)
{ {
signed int err, result; signed int result;
int32_t pterm, dterm, fp_error; int32_t pterm, dterm, fp_error;
int32_t integral_limit; int32_t integral_limit;
err = pid->setpoint - busy; fp_error = int_tofp(pid->setpoint) - busy;
fp_error = int_tofp(err);
if (abs(err) <= pid->deadband) if (abs(fp_error) <= int_tofp(pid->deadband))
return 0; return 0;
pterm = mul_fp(pid->p_gain, fp_error); pterm = mul_fp(pid->p_gain, fp_error);
...@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy) ...@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
if (pid->integral < -integral_limit) if (pid->integral < -integral_limit)
pid->integral = -integral_limit; pid->integral = -integral_limit;
dterm = mul_fp(pid->d_gain, (err - pid->last_err)); dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
pid->last_err = err; pid->last_err = fp_error;
result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
...@@ -436,8 +435,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, ...@@ -436,8 +435,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample) struct sample *sample)
{ {
u64 core_pct; u64 core_pct;
core_pct = div64_u64(sample->aperf * 100, sample->mperf); core_pct = div64_u64(int_tofp(sample->aperf * 100),
sample->freq = cpu->pstate.max_pstate * core_pct * 1000; sample->mperf);
sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
sample->core_pct_busy = core_pct; sample->core_pct_busy = core_pct;
} }
...@@ -469,22 +469,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) ...@@ -469,22 +469,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
mod_timer_pinned(&cpu->timer, jiffies + delay); mod_timer_pinned(&cpu->timer, jiffies + delay);
} }
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{ {
int32_t busy_scaled;
int32_t core_busy, max_pstate, current_pstate; int32_t core_busy, max_pstate, current_pstate;
core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate); max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate);
busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
return fp_toint(busy_scaled);
} }
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{ {
int busy_scaled; int32_t busy_scaled;
struct _pid *pid; struct _pid *pid;
signed int ctl = 0; signed int ctl = 0;
int steps; int steps;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment