Commit 7425ecd5 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

Merge cpufreq changes for 4.19.

These are driver extensions, some driver and core fixes and a new
tracepoint for the tracking of frequency limits changes (coming from
Android).

* pm-cpufreq:
  cpufreq: intel_pstate: Ignore turbo active ratio in HWP
  cpufreq: Fix a circular lock dependency problem
  cpu/hotplug: Add a cpus_read_trylock() function
  cpufreq: trace frequency limits change
  cpufreq: intel_pstate: Show different max frequency with turbo 3 and HWP
  cpufreq: pcc-cpufreq: Disable dynamic scaling on many-CPU systems
  cpufreq: qcom-kryo: Silently error out on EPROBE_DEFER
  cpufreq / CPPC: Add cpuinfo_cur_freq support for CPPC
  cpufreq: armada-37xx: Add AVS support
  dt-bindings: marvell: Add documentation for the Armada 3700 AVS binding
  cpufreq: imx6q/thermal: imx: register cooling device depending on OF
  cpufreq: intel_pstate: use match_string() helper
parents 8f0f2b62 d3264f75
...@@ -33,3 +33,18 @@ nb_pm: syscon@14000 { ...@@ -33,3 +33,18 @@ nb_pm: syscon@14000 {
compatible = "marvell,armada-3700-nb-pm", "syscon"; compatible = "marvell,armada-3700-nb-pm", "syscon";
reg = <0x14000 0x60>; reg = <0x14000 0x60>;
} }
AVS
---
For AVS an other component is needed:
Required properties:
- compatible : should contain "marvell,armada-3700-avs", "syscon";
- reg : the register start and length for the AVS
Example:
avs: avs@11500 {
compatible = "marvell,armada-3700-avs", "syscon";
reg = <0x11500 0x40>;
}
...@@ -27,6 +27,7 @@ cpufreq. ...@@ -27,6 +27,7 @@ cpufreq.
cpu_idle "state=%lu cpu_id=%lu" cpu_idle "state=%lu cpu_id=%lu"
cpu_frequency "state=%lu cpu_id=%lu" cpu_frequency "state=%lu cpu_id=%lu"
cpu_frequency_limits "min=%lu max=%lu cpu_id=%lu"
A suspend event is used to indicate the system going in and out of the A suspend event is used to indicate the system going in and out of the
suspend mode: suspend mode:
......
...@@ -51,6 +51,16 @@ ...@@ -51,6 +51,16 @@
#define ARMADA_37XX_DVFS_LOAD_2 2 #define ARMADA_37XX_DVFS_LOAD_2 2
#define ARMADA_37XX_DVFS_LOAD_3 3 #define ARMADA_37XX_DVFS_LOAD_3 3
/* AVS register set */
#define ARMADA_37XX_AVS_CTL0 0x0
#define ARMADA_37XX_AVS_ENABLE BIT(30)
#define ARMADA_37XX_AVS_HIGH_VDD_LIMIT 16
#define ARMADA_37XX_AVS_LOW_VDD_LIMIT 22
#define ARMADA_37XX_AVS_VDD_MASK 0x3F
#define ARMADA_37XX_AVS_CTL2 0x8
#define ARMADA_37XX_AVS_LOW_VDD_EN BIT(6)
#define ARMADA_37XX_AVS_VSET(x) (0x1C + 4 * (x))
/* /*
* On Armada 37xx the Power management manages 4 level of CPU load, * On Armada 37xx the Power management manages 4 level of CPU load,
* each level can be associated with a CPU clock source, a CPU * each level can be associated with a CPU clock source, a CPU
...@@ -58,6 +68,17 @@ ...@@ -58,6 +68,17 @@
*/ */
#define LOAD_LEVEL_NR 4 #define LOAD_LEVEL_NR 4
#define MIN_VOLT_MV 1000
/* AVS value for the corresponding voltage (in mV) */
static int avs_map[] = {
747, 758, 770, 782, 793, 805, 817, 828, 840, 852, 863, 875, 887, 898,
910, 922, 933, 945, 957, 968, 980, 992, 1003, 1015, 1027, 1038, 1050,
1062, 1073, 1085, 1097, 1108, 1120, 1132, 1143, 1155, 1167, 1178, 1190,
1202, 1213, 1225, 1237, 1248, 1260, 1272, 1283, 1295, 1307, 1318, 1330,
1342
};
struct armada37xx_cpufreq_state { struct armada37xx_cpufreq_state {
struct regmap *regmap; struct regmap *regmap;
u32 nb_l0l1; u32 nb_l0l1;
...@@ -71,6 +92,7 @@ static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state; ...@@ -71,6 +92,7 @@ static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state;
struct armada_37xx_dvfs { struct armada_37xx_dvfs {
u32 cpu_freq_max; u32 cpu_freq_max;
u8 divider[LOAD_LEVEL_NR]; u8 divider[LOAD_LEVEL_NR];
u32 avs[LOAD_LEVEL_NR];
}; };
static struct armada_37xx_dvfs armada_37xx_dvfs[] = { static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
...@@ -148,6 +170,128 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base, ...@@ -148,6 +170,128 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
clk_set_parent(clk, parent); clk_set_parent(clk, parent);
} }
/*
* Find out the armada 37x supported AVS value whose voltage value is
* the round-up closest to the target voltage value.
*/
static u32 armada_37xx_avs_val_match(int target_vm)
{
u32 avs;
/* Find out the round-up closest supported voltage value */
for (avs = 0; avs < ARRAY_SIZE(avs_map); avs++)
if (avs_map[avs] >= target_vm)
break;
/*
* If all supported voltages are smaller than target one,
* choose the largest supported voltage
*/
if (avs == ARRAY_SIZE(avs_map))
avs = ARRAY_SIZE(avs_map) - 1;
return avs;
}
/*
* For Armada 37xx soc, L0(VSET0) VDD AVS value is set to SVC revision
* value or a default value when SVC is not supported.
* - L0 can be read out from the register of AVS_CTRL_0 and L0 voltage
* can be got from the mapping table of avs_map.
* - L1 voltage should be about 100mv smaller than L0 voltage
* - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
* This function calculates L1 & L2 & L3 AVS values dynamically based
* on L0 voltage and fill all AVS values to the AVS value table.
*/
static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
struct armada_37xx_dvfs *dvfs)
{
unsigned int target_vm;
int load_level = 0;
u32 l0_vdd_min;
if (base == NULL)
return;
/* Get L0 VDD min value */
regmap_read(base, ARMADA_37XX_AVS_CTL0, &l0_vdd_min);
l0_vdd_min = (l0_vdd_min >> ARMADA_37XX_AVS_LOW_VDD_LIMIT) &
ARMADA_37XX_AVS_VDD_MASK;
if (l0_vdd_min >= ARRAY_SIZE(avs_map)) {
pr_err("L0 VDD MIN %d is not correct.\n", l0_vdd_min);
return;
}
dvfs->avs[0] = l0_vdd_min;
if (avs_map[l0_vdd_min] <= MIN_VOLT_MV) {
/*
* If L0 voltage is smaller than 1000mv, then all VDD sets
* use L0 voltage;
*/
u32 avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV);
for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
dvfs->avs[load_level] = avs_min;
return;
}
/*
* L1 voltage is equal to L0 voltage - 100mv and it must be
* larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 100;
target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
* L2 & L3 voltage is equal to L0 voltage - 150mv and it must
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
}
static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
struct armada_37xx_dvfs *dvfs)
{
unsigned int avs_val = 0, freq;
int load_level = 0;
if (base == NULL)
return;
/* Disable AVS before the configuration */
regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
ARMADA_37XX_AVS_ENABLE, 0);
/* Enable low voltage mode */
regmap_update_bits(base, ARMADA_37XX_AVS_CTL2,
ARMADA_37XX_AVS_LOW_VDD_EN,
ARMADA_37XX_AVS_LOW_VDD_EN);
for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) {
freq = dvfs->cpu_freq_max / dvfs->divider[load_level];
avs_val = dvfs->avs[load_level];
regmap_update_bits(base, ARMADA_37XX_AVS_VSET(load_level-1),
ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_LOW_VDD_LIMIT,
avs_val << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
avs_val << ARMADA_37XX_AVS_LOW_VDD_LIMIT);
}
/* Enable AVS after the configuration */
regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
ARMADA_37XX_AVS_ENABLE,
ARMADA_37XX_AVS_ENABLE);
}
static void armada37xx_cpufreq_disable_dvfs(struct regmap *base) static void armada37xx_cpufreq_disable_dvfs(struct regmap *base)
{ {
unsigned int reg = ARMADA_37XX_NB_DYN_MOD, unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
...@@ -216,7 +360,7 @@ static int __init armada37xx_cpufreq_driver_init(void) ...@@ -216,7 +360,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct platform_device *pdev; struct platform_device *pdev;
unsigned long freq; unsigned long freq;
unsigned int cur_frequency; unsigned int cur_frequency;
struct regmap *nb_pm_base; struct regmap *nb_pm_base, *avs_base;
struct device *cpu_dev; struct device *cpu_dev;
int load_lvl, ret; int load_lvl, ret;
struct clk *clk; struct clk *clk;
...@@ -227,6 +371,14 @@ static int __init armada37xx_cpufreq_driver_init(void) ...@@ -227,6 +371,14 @@ static int __init armada37xx_cpufreq_driver_init(void)
if (IS_ERR(nb_pm_base)) if (IS_ERR(nb_pm_base))
return -ENODEV; return -ENODEV;
avs_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-avs");
/* if AVS is not present don't use it but still try to setup dvfs */
if (IS_ERR(avs_base)) {
pr_info("Syscon failed for Adapting Voltage Scaling: skip it\n");
avs_base = NULL;
}
/* Before doing any configuration on the DVFS first, disable it */ /* Before doing any configuration on the DVFS first, disable it */
armada37xx_cpufreq_disable_dvfs(nb_pm_base); armada37xx_cpufreq_disable_dvfs(nb_pm_base);
...@@ -270,16 +422,21 @@ static int __init armada37xx_cpufreq_driver_init(void) ...@@ -270,16 +422,21 @@ static int __init armada37xx_cpufreq_driver_init(void)
armada37xx_cpufreq_state->regmap = nb_pm_base; armada37xx_cpufreq_state->regmap = nb_pm_base;
armada37xx_cpufreq_avs_configure(avs_base, dvfs);
armada37xx_cpufreq_avs_setup(avs_base, dvfs);
armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider); armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
clk_put(clk); clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) { load_lvl++) {
unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
freq = cur_frequency / dvfs->divider[load_lvl]; freq = cur_frequency / dvfs->divider[load_lvl];
ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
ret = dev_pm_opp_add(cpu_dev, freq, 0);
if (ret) if (ret)
goto remove_opp; goto remove_opp;
} }
/* Now that everything is setup, enable the DVFS at hardware level */ /* Now that everything is setup, enable the DVFS at hardware level */
......
...@@ -296,10 +296,62 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -296,10 +296,62 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret; return ret;
} }
static inline u64 get_delta(u64 t1, u64 t0)
{
if (t1 > t0 || t0 > ~(u32)0)
return t1 - t0;
return (u32)t1 - (u32)t0;
}
static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
struct cppc_perf_fb_ctrs fb_ctrs_t0,
struct cppc_perf_fb_ctrs fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
u64 reference_perf, delivered_perf;
reference_perf = fb_ctrs_t0.reference_perf;
delta_reference = get_delta(fb_ctrs_t1.reference,
fb_ctrs_t0.reference);
delta_delivered = get_delta(fb_ctrs_t1.delivered,
fb_ctrs_t0.delivered);
/* Check to avoid divide-by zero */
if (delta_reference || delta_delivered)
delivered_perf = (reference_perf * delta_delivered) /
delta_reference;
else
delivered_perf = cpu->perf_ctrls.desired_perf;
return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
}
static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cppc_cpudata *cpu = all_cpu_data[cpunum];
int ret;
ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
if (ret)
return ret;
udelay(2); /* 2usec delay between sampling */
ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
if (ret)
return ret;
return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
}
static struct cpufreq_driver cppc_cpufreq_driver = { static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy, .verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target, .target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
.init = cppc_cpufreq_cpu_init, .init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu, .stop_cpu = cppc_cpufreq_stop_cpu,
.name = "cppc_cpufreq", .name = "cppc_cpufreq",
......
...@@ -923,7 +923,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, ...@@ -923,7 +923,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr); struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
cpus_read_lock(); /*
* cpus_read_trylock() is used here to work around a circular lock
* dependency problem with respect to the cpufreq_register_driver().
*/
if (!cpus_read_trylock())
return -EBUSY;
if (cpu_online(policy->cpu)) { if (cpu_online(policy->cpu)) {
down_write(&policy->rwsem); down_write(&policy->rwsem);
...@@ -2236,6 +2241,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ...@@ -2236,6 +2241,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min = new_policy->min; policy->min = new_policy->min;
policy->max = new_policy->max; policy->max = new_policy->max;
trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX; policy->cached_target_freq = UINT_MAX;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
...@@ -50,6 +51,7 @@ static struct clk_bulk_data clks[] = { ...@@ -50,6 +51,7 @@ static struct clk_bulk_data clks[] = {
}; };
static struct device *cpu_dev; static struct device *cpu_dev;
static struct thermal_cooling_device *cdev;
static bool free_opp; static bool free_opp;
static struct cpufreq_frequency_table *freq_table; static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq; static unsigned int max_freq;
...@@ -191,6 +193,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) ...@@ -191,6 +193,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0; return 0;
} }
static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
{
cdev = of_cpufreq_cooling_register(policy);
if (!cdev)
dev_err(cpu_dev,
"running cpufreq without cooling device: %ld\n",
PTR_ERR(cdev));
}
static int imx6q_cpufreq_init(struct cpufreq_policy *policy) static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{ {
int ret; int ret;
...@@ -202,13 +214,22 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy) ...@@ -202,13 +214,22 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
return ret; return ret;
} }
static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_cooling_unregister(cdev);
return 0;
}
static struct cpufreq_driver imx6q_cpufreq_driver = { static struct cpufreq_driver imx6q_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target, .target_index = imx6q_set_target,
.get = cpufreq_generic_get, .get = cpufreq_generic_get,
.init = imx6q_cpufreq_init, .init = imx6q_cpufreq_init,
.exit = imx6q_cpufreq_exit,
.name = "imx6q-cpufreq", .name = "imx6q-cpufreq",
.ready = imx6q_cpufreq_ready,
.attr = cpufreq_generic_attr, .attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend, .suspend = cpufreq_generic_suspend,
}; };
......
...@@ -670,21 +670,18 @@ static ssize_t store_energy_performance_preference( ...@@ -670,21 +670,18 @@ static ssize_t store_energy_performance_preference(
{ {
struct cpudata *cpu_data = all_cpu_data[policy->cpu]; struct cpudata *cpu_data = all_cpu_data[policy->cpu];
char str_preference[21]; char str_preference[21];
int ret, i = 0; int ret;
ret = sscanf(buf, "%20s", str_preference); ret = sscanf(buf, "%20s", str_preference);
if (ret != 1) if (ret != 1)
return -EINVAL; return -EINVAL;
while (energy_perf_strings[i] != NULL) { ret = match_string(energy_perf_strings, -1, str_preference);
if (!strcmp(str_preference, energy_perf_strings[i])) { if (ret < 0)
intel_pstate_set_energy_pref_index(cpu_data, i); return ret;
return count;
}
++i;
}
return -EINVAL; intel_pstate_set_energy_pref_index(cpu_data, ret);
return count;
} }
static ssize_t show_energy_performance_preference( static ssize_t show_energy_performance_preference(
...@@ -2011,7 +2008,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) ...@@ -2011,7 +2008,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
struct cpudata *cpu) struct cpudata *cpu)
{ {
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && if (!hwp_active &&
cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq && policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_freq) { policy->max > cpu->pstate.max_freq) {
pr_debug("policy->max > max non turbo frequency\n"); pr_debug("policy->max > max non turbo frequency\n");
...@@ -2085,6 +2083,15 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) ...@@ -2085,6 +2083,15 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling; policy->cpuinfo.max_freq *= cpu->pstate.scaling;
if (hwp_active) {
unsigned int max_freq;
max_freq = global.turbo_disabled ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
if (max_freq < policy->cpuinfo.max_freq)
policy->cpuinfo.max_freq = max_freq;
}
intel_pstate_init_acpi_perf_limits(policy); intel_pstate_init_acpi_perf_limits(policy);
policy->fast_switch_possible = true; policy->fast_switch_possible = true;
......
...@@ -593,6 +593,15 @@ static int __init pcc_cpufreq_init(void) ...@@ -593,6 +593,15 @@ static int __init pcc_cpufreq_init(void)
return ret; return ret;
} }
if (num_present_cpus() > 4) {
pcc_cpufreq_driver.flags |= CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
pr_err("%s: Too many CPUs, dynamic performance scaling disabled\n",
__func__);
pr_err("%s: Try to enable another scaling driver through BIOS settings\n",
__func__);
pr_err("%s: and complain to the system vendor\n", __func__);
}
ret = cpufreq_register_driver(&pcc_cpufreq_driver); ret = cpufreq_register_driver(&pcc_cpufreq_driver);
return ret; return ret;
......
...@@ -109,8 +109,9 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) ...@@ -109,8 +109,9 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
speedbin_nvmem = of_nvmem_cell_get(np, NULL); speedbin_nvmem = of_nvmem_cell_get(np, NULL);
of_node_put(np); of_node_put(np);
if (IS_ERR(speedbin_nvmem)) { if (IS_ERR(speedbin_nvmem)) {
dev_err(cpu_dev, "Could not get nvmem cell: %ld\n", if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
PTR_ERR(speedbin_nvmem)); dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
PTR_ERR(speedbin_nvmem));
return PTR_ERR(speedbin_nvmem); return PTR_ERR(speedbin_nvmem);
} }
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
// Copyright 2013 Freescale Semiconductor, Inc. // Copyright 2013 Freescale Semiconductor, Inc.
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpu_cooling.h> #include <linux/cpu_cooling.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -644,6 +645,27 @@ static const struct of_device_id of_imx_thermal_match[] = { ...@@ -644,6 +645,27 @@ static const struct of_device_id of_imx_thermal_match[] = {
}; };
MODULE_DEVICE_TABLE(of, of_imx_thermal_match); MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
/*
* Create cooling device in case no #cooling-cells property is available in
* CPU node
*/
static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
{
struct device_node *np = of_get_cpu_node(data->policy->cpu, NULL);
int ret;
if (!np || !of_find_property(np, "#cooling-cells", NULL)) {
data->cdev = cpufreq_cooling_register(data->policy);
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
cpufreq_cpu_put(data->policy);
return ret;
}
}
return 0;
}
static int imx_thermal_probe(struct platform_device *pdev) static int imx_thermal_probe(struct platform_device *pdev)
{ {
struct imx_thermal_data *data; struct imx_thermal_data *data;
...@@ -724,12 +746,10 @@ static int imx_thermal_probe(struct platform_device *pdev) ...@@ -724,12 +746,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
return -EPROBE_DEFER; return -EPROBE_DEFER;
} }
data->cdev = cpufreq_cooling_register(data->policy); ret = imx_thermal_register_legacy_cooling(data);
if (IS_ERR(data->cdev)) { if (ret) {
ret = PTR_ERR(data->cdev);
dev_err(&pdev->dev, dev_err(&pdev->dev,
"failed to register cpufreq cooling device: %d\n", ret); "failed to register cpufreq cooling device: %d\n", ret);
cpufreq_cpu_put(data->policy);
return ret; return ret;
} }
......
...@@ -103,6 +103,7 @@ extern void cpus_write_lock(void); ...@@ -103,6 +103,7 @@ extern void cpus_write_lock(void);
extern void cpus_write_unlock(void); extern void cpus_write_unlock(void);
extern void cpus_read_lock(void); extern void cpus_read_lock(void);
extern void cpus_read_unlock(void); extern void cpus_read_unlock(void);
extern int cpus_read_trylock(void);
extern void lockdep_assert_cpus_held(void); extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void); extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void); extern void cpu_hotplug_enable(void);
...@@ -115,6 +116,7 @@ static inline void cpus_write_lock(void) { } ...@@ -115,6 +116,7 @@ static inline void cpus_write_lock(void) { }
static inline void cpus_write_unlock(void) { } static inline void cpus_write_unlock(void) { }
static inline void cpus_read_lock(void) { } static inline void cpus_read_lock(void) { }
static inline void cpus_read_unlock(void) { } static inline void cpus_read_unlock(void) { }
static inline int cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { } static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { } static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { } static inline void cpu_hotplug_enable(void) { }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_POWER_H #define _TRACE_POWER_H
#include <linux/cpufreq.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/pm_qos.h> #include <linux/pm_qos.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
...@@ -148,6 +149,30 @@ DEFINE_EVENT(cpu, cpu_frequency, ...@@ -148,6 +149,30 @@ DEFINE_EVENT(cpu, cpu_frequency,
TP_ARGS(frequency, cpu_id) TP_ARGS(frequency, cpu_id)
); );
TRACE_EVENT(cpu_frequency_limits,
TP_PROTO(struct cpufreq_policy *policy),
TP_ARGS(policy),
TP_STRUCT__entry(
__field(u32, min_freq)
__field(u32, max_freq)
__field(u32, cpu_id)
),
TP_fast_assign(
__entry->min_freq = policy->min;
__entry->max_freq = policy->max;
__entry->cpu_id = policy->cpu;
),
TP_printk("min=%lu max=%lu cpu_id=%lu",
(unsigned long)__entry->min_freq,
(unsigned long)__entry->max_freq,
(unsigned long)__entry->cpu_id)
);
TRACE_EVENT(device_pm_callback_start, TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event), TP_PROTO(struct device *dev, const char *pm_ops, int event),
......
...@@ -290,6 +290,12 @@ void cpus_read_lock(void) ...@@ -290,6 +290,12 @@ void cpus_read_lock(void)
} }
EXPORT_SYMBOL_GPL(cpus_read_lock); EXPORT_SYMBOL_GPL(cpus_read_lock);
int cpus_read_trylock(void)
{
return percpu_down_read_trylock(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_trylock);
void cpus_read_unlock(void) void cpus_read_unlock(void)
{ {
percpu_up_read(&cpu_hotplug_lock); percpu_up_read(&cpu_hotplug_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment