Commit c79e6fa9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management updates from Rafael Wysocki:
 "These add support for some new hardware, extend the existing hardware
  support, fix some issues and clean up code

  Specifics:

   - Add isupport for Tiger Lake in no-HWP mode to intel_pstate (Doug
     Smythies)

   - Update the AMD P-state driver (Perry Yuan):
      - Fix wrong lowest perf fetch
      - Map desired perf into pstate scope for powersave governor
      - Update pstate frequency transition delay time
      - Fix initial highest_perf value
      - Clean up

   - Move max CPU capacity to sugov_policy in the schedutil cpufreq
     governor (Lukasz Luba)

   - Add SM6115 to cpufreq-dt blocklist (Adam Skladowski)

   - Add support for Tegra239 and minor cleanups (Sumit Gupta, ye
     xingchen, and Yang Yingliang)

   - Add freq qos for qcom cpufreq driver and minor cleanups (Xuewen
     Yan, and Viresh Kumar)

   - Minor cleanups around functions called at module_init() (Xiu
     Jianfeng)

   - Use module_init and add module_exit for bmips driver (Zhang
     Jianhua)

   - Add AlderLake-N support to intel_idle (Zhang Rui)

   - Replace strlcpy() with unused retval with strscpy() in intel_idle
     (Wolfram Sang)

   - Remove redundant check from cpuidle_switch_governor() (Yu Liao)

   - Replace strlcpy() with unused retval with strscpy() in the powernv
     cpuidle driver (Wolfram Sang)

   - Drop duplicate word from a comment in the coupled cpuidle driver
     (Jason Wang)

   - Make rpm_resume() return -EINPROGRESS if RPM_NOWAIT is passed to it
     in the flags and the device is about to resume (Rafael Wysocki)

   - Add extra debugging statement for multiple active IRQs to system
     wakeup handling code (Mario Limonciello)

   - Replace strlcpy() with unused retval with strscpy() in the core
     system suspend support code (Wolfram Sang)

   - Update the intel_rapl power capping driver:
      - Use standard Energy Unit for SPR Dram RAPL domain (Zhang Rui).
      - Add support for RAPTORLAKE_S (Zhang Rui).
      - Fix UBSAN shift-out-of-bounds issue (Chao Qin)

   - Handle -EPROBE_DEFER when regulator is not probed on
     mtk-ci-devfreq.c (AngeloGioacchino Del Regno)

   - Fix message typo and use dev_err_probe() in rockchip-dfi.c
     (Christophe JAILLET)"

* tag 'pm-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (29 commits)
  cpufreq: qcom-cpufreq-hw: Add cpufreq qos for LMh
  cpufreq: Add __init annotation to module init funcs
  cpufreq: tegra194: change tegra239_cpufreq_soc to static
  PM / devfreq: rockchip-dfi: Fix an error message
  PM / devfreq: mtk-cci: Handle sram regulator probe deferral
  powercap: intel_rapl: Use standard Energy Unit for SPR Dram RAPL domain
  PM: runtime: Return -EINPROGRESS from rpm_resume() in the RPM_NOWAIT case
  intel_idle: Add AlderLake-N support
  powercap: intel_rapl: fix UBSAN shift-out-of-bounds issue
  cpufreq: tegra194: Add support for Tegra239
  cpufreq: qcom-cpufreq-hw: Fix uninitialized throttled_freq warning
  cpufreq: intel_pstate: Add Tigerlake support in no-HWP mode
  powercap: intel_rapl: Add support for RAPTORLAKE_S
  cpufreq: amd-pstate: Fix initial highest_perf value
  cpuidle: Remove redundant check in cpuidle_switch_governor()
  PM: wakeup: Add extra debugging statement for multiple active IRQs
  cpufreq: tegra194: Remove the unneeded result variable
  PM: suspend: move from strlcpy() with unused retval to strscpy()
  intel_idle: move from strlcpy() with unused retval to strscpy()
  cpuidle: powernv: move from strlcpy() with unused retval to strscpy()
  ...
parents 9388076b ac73ce39
...@@ -792,10 +792,13 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -792,10 +792,13 @@ static int rpm_resume(struct device *dev, int rpmflags)
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
if (dev->power.runtime_status == RPM_SUSPENDING) if (dev->power.runtime_status == RPM_SUSPENDING) {
dev->power.deferred_resume = true; dev->power.deferred_resume = true;
else if (rpmflags & RPM_NOWAIT)
retval = -EINPROGRESS; retval = -EINPROGRESS;
} else {
retval = -EINPROGRESS;
}
goto out; goto out;
} }
......
...@@ -944,6 +944,8 @@ void pm_system_irq_wakeup(unsigned int irq_number) ...@@ -944,6 +944,8 @@ void pm_system_irq_wakeup(unsigned int irq_number)
else else
irq_number = 0; irq_number = 0;
pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
if (irq_number) if (irq_number)
......
...@@ -46,8 +46,8 @@ ...@@ -46,8 +46,8 @@
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include "amd-pstate-trace.h" #include "amd-pstate-trace.h"
#define AMD_PSTATE_TRANSITION_LATENCY 0x20000 #define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 500 #define AMD_PSTATE_TRANSITION_DELAY 1000
/* /*
* TODO: We need more time to fine tune processors with shared memory solution * TODO: We need more time to fine tune processors with shared memory solution
...@@ -152,6 +152,7 @@ static inline int amd_pstate_enable(bool enable) ...@@ -152,6 +152,7 @@ static inline int amd_pstate_enable(bool enable)
static int pstate_init_perf(struct amd_cpudata *cpudata) static int pstate_init_perf(struct amd_cpudata *cpudata)
{ {
u64 cap1; u64 cap1;
u32 highest_perf;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1); &cap1);
...@@ -163,7 +164,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) ...@@ -163,7 +164,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
* *
* CPPC entry doesn't indicate the highest performance in some ASICs. * CPPC entry doesn't indicate the highest performance in some ASICs.
*/ */
WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); highest_perf = amd_get_highest_perf();
if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
...@@ -175,12 +180,17 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) ...@@ -175,12 +180,17 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
static int cppc_init_perf(struct amd_cpudata *cpudata) static int cppc_init_perf(struct amd_cpudata *cpudata)
{ {
struct cppc_perf_caps cppc_perf; struct cppc_perf_caps cppc_perf;
u32 highest_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret) if (ret)
return ret; return ret;
WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); highest_perf = amd_get_highest_perf();
if (highest_perf > cppc_perf.highest_perf)
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf, WRITE_ONCE(cpudata->lowest_nonlinear_perf,
...@@ -269,6 +279,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, ...@@ -269,6 +279,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u64 prev = READ_ONCE(cpudata->cppc_req_cached); u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev; u64 value = prev;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
value &= ~AMD_CPPC_MIN_PERF(~0L); value &= ~AMD_CPPC_MIN_PERF(~0L);
value |= AMD_CPPC_MIN_PERF(min_perf); value |= AMD_CPPC_MIN_PERF(min_perf);
...@@ -312,7 +323,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy, ...@@ -312,7 +323,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
return -ENODEV; return -ENODEV;
cap_perf = READ_ONCE(cpudata->highest_perf); cap_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); min_perf = READ_ONCE(cpudata->lowest_perf);
max_perf = cap_perf; max_perf = cap_perf;
freqs.old = policy->cur; freqs.old = policy->cur;
...@@ -357,8 +368,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu, ...@@ -357,8 +368,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (max_perf < min_perf) if (max_perf < min_perf)
max_perf = min_perf; max_perf = min_perf;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
} }
...@@ -555,9 +564,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) ...@@ -555,9 +564,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{ {
struct amd_cpudata *cpudata; struct amd_cpudata *cpudata = policy->driver_data;
cpudata = policy->driver_data;
freq_qos_remove_request(&cpudata->req[1]); freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]); freq_qos_remove_request(&cpudata->req[0]);
...@@ -599,9 +606,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy, ...@@ -599,9 +606,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf) char *buf)
{ {
int max_freq; int max_freq;
struct amd_cpudata *cpudata; struct amd_cpudata *cpudata = policy->driver_data;
cpudata = policy->driver_data;
max_freq = amd_get_max_freq(cpudata); max_freq = amd_get_max_freq(cpudata);
if (max_freq < 0) if (max_freq < 0)
...@@ -614,9 +619,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli ...@@ -614,9 +619,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
char *buf) char *buf)
{ {
int freq; int freq;
struct amd_cpudata *cpudata; struct amd_cpudata *cpudata = policy->driver_data;
cpudata = policy->driver_data;
freq = amd_get_lowest_nonlinear_freq(cpudata); freq = amd_get_lowest_nonlinear_freq(cpudata);
if (freq < 0) if (freq < 0)
......
...@@ -156,7 +156,7 @@ static struct cpufreq_driver bmips_cpufreq_driver = { ...@@ -156,7 +156,7 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
.name = BMIPS_CPUFREQ_PREFIX, .name = BMIPS_CPUFREQ_PREFIX,
}; };
static int __init bmips_cpufreq_probe(void) static int __init bmips_cpufreq_driver_init(void)
{ {
struct cpufreq_compat *cc; struct cpufreq_compat *cc;
struct device_node *np; struct device_node *np;
...@@ -176,7 +176,13 @@ static int __init bmips_cpufreq_probe(void) ...@@ -176,7 +176,13 @@ static int __init bmips_cpufreq_probe(void)
return cpufreq_register_driver(&bmips_cpufreq_driver); return cpufreq_register_driver(&bmips_cpufreq_driver);
} }
device_initcall(bmips_cpufreq_probe); module_init(bmips_cpufreq_driver_init);
static void __exit bmips_cpufreq_driver_exit(void)
{
cpufreq_unregister_driver(&bmips_cpufreq_driver);
}
module_exit(bmips_cpufreq_driver_exit);
MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>"); MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs"); MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs");
......
...@@ -146,6 +146,7 @@ static const struct of_device_id blocklist[] __initconst = { ...@@ -146,6 +146,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sc8180x", }, { .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sc8280xp", }, { .compatible = "qcom,sc8280xp", },
{ .compatible = "qcom,sdm845", }, { .compatible = "qcom,sdm845", },
{ .compatible = "qcom,sm6115", },
{ .compatible = "qcom,sm6350", }, { .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm8150", }, { .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", }, { .compatible = "qcom,sm8250", },
......
...@@ -55,7 +55,7 @@ static struct notifier_block hb_cpufreq_clk_nb = { ...@@ -55,7 +55,7 @@ static struct notifier_block hb_cpufreq_clk_nb = {
.notifier_call = hb_cpufreq_clk_notify, .notifier_call = hb_cpufreq_clk_notify,
}; };
static int hb_cpufreq_driver_init(void) static int __init hb_cpufreq_driver_init(void)
{ {
struct platform_device_info devinfo = { .name = "cpufreq-dt", }; struct platform_device_info devinfo = { .name = "cpufreq-dt", };
struct device *cpu_dev; struct device *cpu_dev;
......
...@@ -2416,6 +2416,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ...@@ -2416,6 +2416,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(SKYLAKE_X, core_funcs), X86_MATCH(SKYLAKE_X, core_funcs),
X86_MATCH(COMETLAKE, core_funcs), X86_MATCH(COMETLAKE, core_funcs),
X86_MATCH(ICELAKE_X, core_funcs), X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(TIGERLAKE, core_funcs),
{} {}
}; };
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/pm_opp.h> #include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/units.h> #include <linux/units.h>
...@@ -56,6 +57,8 @@ struct qcom_cpufreq_data { ...@@ -56,6 +57,8 @@ struct qcom_cpufreq_data {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
bool per_core_dcvs; bool per_core_dcvs;
struct freq_qos_request throttle_freq_req;
}; };
static unsigned long cpu_hw_rate, xo_rate; static unsigned long cpu_hw_rate, xo_rate;
...@@ -316,14 +319,16 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) ...@@ -316,14 +319,16 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
if (IS_ERR(opp)) { if (IS_ERR(opp)) {
dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp); dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
} else { } else {
dev_pm_opp_put(opp);
}
throttled_freq = freq_hz / HZ_PER_KHZ; throttled_freq = freq_hz / HZ_PER_KHZ;
freq_qos_update_request(&data->throttle_freq_req, throttled_freq);
/* Update thermal pressure (the boost frequencies are accepted) */ /* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq); arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
dev_pm_opp_put(opp);
}
/* /*
* In the unlikely case policy is unregistered do not enable * In the unlikely case policy is unregistered do not enable
* polling or h/w interrupt * polling or h/w interrupt
...@@ -413,6 +418,14 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) ...@@ -413,6 +418,14 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
if (data->throttle_irq < 0) if (data->throttle_irq < 0)
return data->throttle_irq; return data->throttle_irq;
ret = freq_qos_add_request(&policy->constraints,
&data->throttle_freq_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret);
return ret;
}
data->cancel_throttle = false; data->cancel_throttle = false;
data->policy = policy; data->policy = policy;
...@@ -479,6 +492,7 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data) ...@@ -479,6 +492,7 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
if (data->throttle_irq <= 0) if (data->throttle_irq <= 0)
return; return;
freq_qos_remove_request(&data->throttle_freq_req);
free_irq(data->throttle_irq, data); free_irq(data->throttle_irq, data);
} }
......
...@@ -252,7 +252,7 @@ static int sti_cpufreq_fetch_syscon_registers(void) ...@@ -252,7 +252,7 @@ static int sti_cpufreq_fetch_syscon_registers(void)
return 0; return 0;
} }
static int sti_cpufreq_init(void) static int __init sti_cpufreq_init(void)
{ {
int ret; int ret;
......
...@@ -38,14 +38,6 @@ ...@@ -38,14 +38,6 @@
/* cpufreq transisition latency */ /* cpufreq transisition latency */
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
enum cluster {
CLUSTER0,
CLUSTER1,
CLUSTER2,
CLUSTER3,
MAX_CLUSTERS,
};
struct tegra_cpu_ctr { struct tegra_cpu_ctr {
u32 cpu; u32 cpu;
u32 coreclk_cnt, last_coreclk_cnt; u32 coreclk_cnt, last_coreclk_cnt;
...@@ -67,12 +59,12 @@ struct tegra_cpufreq_ops { ...@@ -67,12 +59,12 @@ struct tegra_cpufreq_ops {
struct tegra_cpufreq_soc { struct tegra_cpufreq_soc {
struct tegra_cpufreq_ops *ops; struct tegra_cpufreq_ops *ops;
int maxcpus_per_cluster; int maxcpus_per_cluster;
unsigned int num_clusters;
phys_addr_t actmon_cntr_base; phys_addr_t actmon_cntr_base;
}; };
struct tegra194_cpufreq_data { struct tegra194_cpufreq_data {
void __iomem *regs; void __iomem *regs;
size_t num_clusters;
struct cpufreq_frequency_table **tables; struct cpufreq_frequency_table **tables;
const struct tegra_cpufreq_soc *soc; const struct tegra_cpufreq_soc *soc;
}; };
...@@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = { ...@@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops, .ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x9000, .actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4, .maxcpus_per_cluster = 4,
.num_clusters = 3,
};
static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x4000,
.maxcpus_per_cluster = 8,
.num_clusters = 1,
}; };
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
...@@ -314,11 +314,7 @@ static void tegra194_get_cpu_ndiv_sysreg(void *ndiv) ...@@ -314,11 +314,7 @@ static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
{ {
int ret; return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
return ret;
} }
static void tegra194_set_cpu_ndiv_sysreg(void *data) static void tegra194_set_cpu_ndiv_sysreg(void *data)
...@@ -382,7 +378,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) ...@@ -382,7 +378,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
if (clusterid >= data->num_clusters || !data->tables[clusterid]) if (clusterid >= data->soc->num_clusters || !data->tables[clusterid])
return -EINVAL; return -EINVAL;
start_cpu = rounddown(policy->cpu, maxcpus_per_cluster); start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
...@@ -433,6 +429,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = { ...@@ -433,6 +429,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops, .ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2, .maxcpus_per_cluster = 2,
.num_clusters = 4,
}; };
static void tegra194_cpufreq_free_resources(void) static void tegra194_cpufreq_free_resources(void)
...@@ -525,15 +522,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) ...@@ -525,15 +522,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
soc = of_device_get_match_data(&pdev->dev); soc = of_device_get_match_data(&pdev->dev);
if (soc->ops && soc->maxcpus_per_cluster) { if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
data->soc = soc; data->soc = soc;
} else { } else {
dev_err(&pdev->dev, "soc data missing\n"); dev_err(&pdev->dev, "soc data missing\n");
return -EINVAL; return -EINVAL;
} }
data->num_clusters = MAX_CLUSTERS; data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
sizeof(*data->tables), GFP_KERNEL); sizeof(*data->tables), GFP_KERNEL);
if (!data->tables) if (!data->tables)
return -ENOMEM; return -ENOMEM;
...@@ -558,7 +554,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) ...@@ -558,7 +554,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
goto put_bpmp; goto put_bpmp;
} }
for (i = 0; i < data->num_clusters; i++) { for (i = 0; i < data->soc->num_clusters; i++) {
data->tables[i] = init_freq_table(pdev, bpmp, i); data->tables[i] = init_freq_table(pdev, bpmp, i);
if (IS_ERR(data->tables[i])) { if (IS_ERR(data->tables[i])) {
err = PTR_ERR(data->tables[i]); err = PTR_ERR(data->tables[i]);
...@@ -590,6 +586,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev) ...@@ -590,6 +586,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev)
static const struct of_device_id tegra194_cpufreq_of_match[] = { static const struct of_device_id tegra194_cpufreq_of_match[] = {
{ .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc }, { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
{ .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc }, { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
{ .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
{ /* sentinel */ } { /* sentinel */ }
}; };
......
...@@ -398,7 +398,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev) ...@@ -398,7 +398,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
return ret; return ret;
} }
static int ti_cpufreq_init(void) static int __init ti_cpufreq_init(void)
{ {
const struct of_device_id *match; const struct of_device_id *match;
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
* variable is not locked. It is only written from the cpu that * variable is not locked. It is only written from the cpu that
* it stores (or by the on/offlining cpu if that cpu is offline), * it stores (or by the on/offlining cpu if that cpu is offline),
* and only read after all the cpus are ready for the coupled idle * and only read after all the cpus are ready for the coupled idle
* state are are no longer updating it. * state are no longer updating it.
* *
* Three atomic counters are used. alive_count tracks the number * Three atomic counters are used. alive_count tracks the number
* of cpus in the coupled set that are currently or soon will be * of cpus in the coupled set that are currently or soon will be
......
...@@ -233,8 +233,8 @@ static inline void add_powernv_state(int index, const char *name, ...@@ -233,8 +233,8 @@ static inline void add_powernv_state(int index, const char *name,
unsigned int exit_latency, unsigned int exit_latency,
u64 psscr_val, u64 psscr_mask) u64 psscr_val, u64 psscr_mask)
{ {
strlcpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN); strscpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN);
strlcpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN); strscpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN);
powernv_states[index].flags = flags; powernv_states[index].flags = flags;
powernv_states[index].target_residency = target_residency; powernv_states[index].target_residency = target_residency;
powernv_states[index].exit_latency = exit_latency; powernv_states[index].exit_latency = exit_latency;
......
...@@ -63,12 +63,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov) ...@@ -63,12 +63,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
cpuidle_curr_governor = gov; cpuidle_curr_governor = gov;
if (gov) {
list_for_each_entry(dev, &cpuidle_detected_devices, device_list) list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_enable_device(dev); cpuidle_enable_device(dev);
cpuidle_install_idle_handler(); cpuidle_install_idle_handler();
printk(KERN_INFO "cpuidle: using governor %s\n", gov->name); pr_info("cpuidle: using governor %s\n", gov->name);
}
return 0; return 0;
} }
......
...@@ -189,10 +189,9 @@ static int rockchip_dfi_probe(struct platform_device *pdev) ...@@ -189,10 +189,9 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
return PTR_ERR(data->regs); return PTR_ERR(data->regs);
data->clk = devm_clk_get(dev, "pclk_ddr_mon"); data->clk = devm_clk_get(dev, "pclk_ddr_mon");
if (IS_ERR(data->clk)) { if (IS_ERR(data->clk))
dev_err(dev, "Cannot get the clk dmc_clk\n"); return dev_err_probe(dev, PTR_ERR(data->clk),
return PTR_ERR(data->clk); "Cannot get the clk pclk_ddr_mon\n");
}
/* try to find the optional reference to the pmu syscon */ /* try to find the optional reference to the pmu syscon */
node = of_parse_phandle(np, "rockchip,pmu", 0); node = of_parse_phandle(np, "rockchip,pmu", 0);
......
...@@ -291,9 +291,13 @@ static int mtk_ccifreq_probe(struct platform_device *pdev) ...@@ -291,9 +291,13 @@ static int mtk_ccifreq_probe(struct platform_device *pdev)
} }
drv->sram_reg = devm_regulator_get_optional(dev, "sram"); drv->sram_reg = devm_regulator_get_optional(dev, "sram");
if (IS_ERR(drv->sram_reg)) if (IS_ERR(drv->sram_reg)) {
ret = PTR_ERR(drv->sram_reg);
if (ret == -EPROBE_DEFER)
goto out_free_resources;
drv->sram_reg = NULL; drv->sram_reg = NULL;
else { } else {
ret = regulator_enable(drv->sram_reg); ret = regulator_enable(drv->sram_reg);
if (ret) { if (ret) {
dev_err(dev, "failed to enable sram regulator\n"); dev_err(dev, "failed to enable sram regulator\n");
......
...@@ -928,6 +928,51 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { ...@@ -928,6 +928,51 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.enter = NULL } .enter = NULL }
}; };
static struct cpuidle_state adl_n_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 195,
.target_residency = 585,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 260,
.target_residency = 1040,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 660,
.target_residency = 1980,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
static struct cpuidle_state spr_cstates[] __initdata = { static struct cpuidle_state spr_cstates[] __initdata = {
{ {
.name = "C1", .name = "C1",
...@@ -1309,6 +1354,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = { ...@@ -1309,6 +1354,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = {
.state_table = adl_l_cstates, .state_table = adl_l_cstates,
}; };
static const struct idle_cpu idle_cpu_adl_n __initconst = {
.state_table = adl_n_cstates,
};
static const struct idle_cpu idle_cpu_spr __initconst = { static const struct idle_cpu idle_cpu_spr __initconst = {
.state_table = spr_cstates, .state_table = spr_cstates,
.disable_promotion_to_c1e = true, .disable_promotion_to_c1e = true,
...@@ -1379,6 +1428,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { ...@@ -1379,6 +1428,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
...@@ -1507,7 +1557,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) ...@@ -1507,7 +1557,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
state = &drv->states[drv->state_count++]; state = &drv->states[drv->state_count++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency; state->exit_latency = cx->latency;
/* /*
* For C1-type C-states use the same number for both the exit * For C1-type C-states use the same number for both the exit
...@@ -1816,6 +1866,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) ...@@ -1816,6 +1866,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
break; break;
case INTEL_FAM6_ALDERLAKE: case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L: case INTEL_FAM6_ALDERLAKE_L:
case INTEL_FAM6_ALDERLAKE_N:
adl_idle_state_table_update(); adl_idle_state_table_update();
break; break;
} }
......
...@@ -994,6 +994,9 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, ...@@ -994,6 +994,9 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
y = value & 0x1f; y = value & 0x1f;
value = (1 << y) * (4 + f) * rp->time_unit / 4; value = (1 << y) * (4 + f) * rp->time_unit / 4;
} else { } else {
if (value < rp->time_unit)
return 0;
do_div(value, rp->time_unit); do_div(value, rp->time_unit);
y = ilog2(value); y = ilog2(value);
f = div64_u64(4 * (value - (1 << y)), 1 << y); f = div64_u64(4 * (value - (1 << y)), 1 << y);
...@@ -1035,7 +1038,6 @@ static const struct rapl_defaults rapl_defaults_spr_server = { ...@@ -1035,7 +1038,6 @@ static const struct rapl_defaults rapl_defaults_spr_server = {
.check_unit = rapl_check_unit_core, .check_unit = rapl_check_unit_core,
.set_floor_freq = set_floor_freq_default, .set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core, .compute_time_window = rapl_compute_time_window_core,
.dram_domain_energy_unit = 15300,
.psys_domain_energy_unit = 1000000000, .psys_domain_energy_unit = 1000000000,
.spr_psys_bits = true, .spr_psys_bits = true,
}; };
...@@ -1110,6 +1112,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { ...@@ -1110,6 +1112,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
......
...@@ -75,7 +75,7 @@ extern struct suspend_stats suspend_stats; ...@@ -75,7 +75,7 @@ extern struct suspend_stats suspend_stats;
static inline void dpm_save_failed_dev(const char *name) static inline void dpm_save_failed_dev(const char *name)
{ {
strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
name, name,
sizeof(suspend_stats.failed_devs[0])); sizeof(suspend_stats.failed_devs[0]));
suspend_stats.last_failed_dev++; suspend_stats.last_failed_dev++;
......
...@@ -25,6 +25,9 @@ struct sugov_policy { ...@@ -25,6 +25,9 @@ struct sugov_policy {
unsigned int next_freq; unsigned int next_freq;
unsigned int cached_raw_freq; unsigned int cached_raw_freq;
/* max CPU capacity, which is equal for all CPUs in freq. domain */
unsigned long max;
/* The next fields are only needed if fast switch cannot be used: */ /* The next fields are only needed if fast switch cannot be used: */
struct irq_work irq_work; struct irq_work irq_work;
struct kthread_work work; struct kthread_work work;
...@@ -48,7 +51,6 @@ struct sugov_cpu { ...@@ -48,7 +51,6 @@ struct sugov_cpu {
unsigned long util; unsigned long util;
unsigned long bw_dl; unsigned long bw_dl;
unsigned long max;
/* The field below is for single-CPU policies only: */ /* The field below is for single-CPU policies only: */
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
...@@ -158,7 +160,6 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) ...@@ -158,7 +160,6 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
{ {
struct rq *rq = cpu_rq(sg_cpu->cpu); struct rq *rq = cpu_rq(sg_cpu->cpu);
sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->bw_dl = cpu_bw_dl(rq); sg_cpu->bw_dl = cpu_bw_dl(rq);
sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
FREQUENCY_UTIL, NULL); FREQUENCY_UTIL, NULL);
...@@ -253,6 +254,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, ...@@ -253,6 +254,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
*/ */
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time) static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
{ {
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long boost; unsigned long boost;
/* No boost currently required */ /* No boost currently required */
...@@ -280,7 +282,8 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time) ...@@ -280,7 +282,8 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
* sg_cpu->util is already in capacity scale; convert iowait_boost * sg_cpu->util is already in capacity scale; convert iowait_boost
* into the same scale so we can compare. * into the same scale so we can compare.
*/ */
boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT; boost = sg_cpu->iowait_boost * sg_policy->max;
boost >>= SCHED_CAPACITY_SHIFT;
boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL); boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
if (sg_cpu->util < boost) if (sg_cpu->util < boost)
sg_cpu->util = boost; sg_cpu->util = boost;
...@@ -337,7 +340,7 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time, ...@@ -337,7 +340,7 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
if (!sugov_update_single_common(sg_cpu, time, flags)) if (!sugov_update_single_common(sg_cpu, time, flags))
return; return;
next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max); next_f = get_next_freq(sg_policy, sg_cpu->util, sg_policy->max);
/* /*
* Do not reduce the frequency if the CPU has not been idle * Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then. * recently, as the reduction is likely to be premature then.
...@@ -373,6 +376,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time, ...@@ -373,6 +376,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
unsigned int flags) unsigned int flags)
{ {
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long prev_util = sg_cpu->util; unsigned long prev_util = sg_cpu->util;
/* /*
...@@ -399,7 +403,8 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time, ...@@ -399,7 +403,8 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
sg_cpu->util = prev_util; sg_cpu->util = prev_util;
cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
map_util_perf(sg_cpu->util), sg_cpu->max); map_util_perf(sg_cpu->util),
sg_policy->max);
sg_cpu->sg_policy->last_freq_update_time = time; sg_cpu->sg_policy->last_freq_update_time = time;
} }
...@@ -408,25 +413,19 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) ...@@ -408,25 +413,19 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{ {
struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy; struct cpufreq_policy *policy = sg_policy->policy;
unsigned long util = 0, max = 1; unsigned long util = 0;
unsigned int j; unsigned int j;
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max;
sugov_get_util(j_sg_cpu); sugov_get_util(j_sg_cpu);
sugov_iowait_apply(j_sg_cpu, time); sugov_iowait_apply(j_sg_cpu, time);
j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
if (j_util * max > j_max * util) { util = max(j_sg_cpu->util, util);
util = j_util;
max = j_max;
}
} }
return get_next_freq(sg_policy, util, max); return get_next_freq(sg_policy, util, sg_policy->max);
} }
static void static void
...@@ -752,7 +751,7 @@ static int sugov_start(struct cpufreq_policy *policy) ...@@ -752,7 +751,7 @@ static int sugov_start(struct cpufreq_policy *policy)
{ {
struct sugov_policy *sg_policy = policy->governor_data; struct sugov_policy *sg_policy = policy->governor_data;
void (*uu)(struct update_util_data *data, u64 time, unsigned int flags); void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
unsigned int cpu; unsigned int cpu = cpumask_first(policy->cpus);
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
sg_policy->last_freq_update_time = 0; sg_policy->last_freq_update_time = 0;
...@@ -760,6 +759,7 @@ static int sugov_start(struct cpufreq_policy *policy) ...@@ -760,6 +759,7 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->work_in_progress = false; sg_policy->work_in_progress = false;
sg_policy->limits_changed = false; sg_policy->limits_changed = false;
sg_policy->cached_raw_freq = 0; sg_policy->cached_raw_freq = 0;
sg_policy->max = arch_scale_cpu_capacity(cpu);
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment