Commit 4d25ec19 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux

Pull thermal management updates from Zhang Rui:

 - Improve thermal cpu_cooling interaction with cpufreq core.

   The cpu_cooling driver is designed to use CPU frequency scaling to
   avoid high thermal states for a platform. But it wasn't glued really
   well with cpufreq core.

   For example clipped-cpus is copied from the policy structure and its
   much better to use the policy->cpus (or related_cpus) fields directly
   as they may have got updated. Not that things were broken before this
   series, but they can be optimized a bit more.

   This series tries to improve interactions between cpufreq core and
   cpu_cooling driver and does some fixes/cleanups to the cpu_cooling
   driver. (Viresh Kumar)

 - A couple of fixes and cleanups in thermal core and imx, hisilicon,
   bcm_2835, int340x thermal drivers. (Arvind Yadav, Dan Carpenter,
   Sumeet Pawnikar, Srinivas Pandruvada, Willy WOLFF)

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux: (24 commits)
  thermal: bcm2835: fix an error code in probe()
  thermal: hisilicon: Handle return value of clk_prepare_enable
  thermal: imx: Handle return value of clk_prepare_enable
  thermal: int340x: check for sensor when PTYP is missing
  Thermal/int340x: Fix few typos and kernel-doc style
  thermal: fix source code documentation for parameters
  thermal: cpu_cooling: Replace kmalloc with kmalloc_array
  thermal: cpu_cooling: Rearrange struct cpufreq_cooling_device
  thermal: cpu_cooling: 'freq' can't be zero in cpufreq_state2power()
  thermal: cpu_cooling: don't store cpu_dev in cpufreq_cdev
  thermal: cpu_cooling: get_level() can't fail
  thermal: cpu_cooling: create structure for idle time stats
  thermal: cpu_cooling: merge frequency and power tables
  thermal: cpu_cooling: get rid of 'allowed_cpus'
  thermal: cpu_cooling: OPPs are registered for all CPUs
  thermal: cpu_cooling: store cpufreq policy
  cpufreq: create cpufreq_table_count_valid_entries()
  thermal: cpu_cooling: use cpufreq_policy to register cooling device
  thermal: cpu_cooling: get rid of a variable in cpufreq_set_cur_state()
  thermal: cpu_cooling: remove cpufreq_cooling_get_level()
  ...
parents 907afe59 5d72ed35
......@@ -540,7 +540,7 @@ static void bL_cpufreq_ready(struct cpufreq_policy *policy)
&power_coefficient);
cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
policy->related_cpus, power_coefficient, NULL);
policy, power_coefficient, NULL);
if (IS_ERR(cdev[cur_cluster])) {
dev_err(cpu_dev,
"running cpufreq without cooling device: %ld\n",
......
......@@ -326,7 +326,7 @@ static void cpufreq_ready(struct cpufreq_policy *policy)
&power_coefficient);
priv->cdev = of_cpufreq_power_cooling_register(np,
policy->related_cpus, power_coefficient, NULL);
policy, power_coefficient, NULL);
if (IS_ERR(priv->cdev)) {
dev_err(priv->cpu_dev,
"running cpufreq without cooling device: %ld\n",
......
......@@ -170,11 +170,10 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
unsigned int i = 0, count = 0, ret = -ENOMEM;
struct cpufreq_stats *stats;
unsigned int alloc_size;
struct cpufreq_frequency_table *pos, *table;
struct cpufreq_frequency_table *pos;
/* We need cpufreq table for creating stats table */
table = policy->freq_table;
if (unlikely(!table))
count = cpufreq_table_count_valid_entries(policy);
if (!count)
return;
/* stats already initialized */
......@@ -185,10 +184,6 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
if (!stats)
return;
/* Find total allocation size */
cpufreq_for_each_valid_entry(pos, table)
count++;
alloc_size = count * sizeof(int) + count * sizeof(u64);
alloc_size += count * count * sizeof(int);
......@@ -205,7 +200,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats->max_state = count;
/* Find valid-unique entries */
cpufreq_for_each_valid_entry(pos, table)
cpufreq_for_each_valid_entry(pos, policy->freq_table)
if (freq_table_get_index(stats, pos->frequency) == -1)
stats->freq_table[i++] = pos->frequency;
......
......@@ -43,7 +43,7 @@ static int dbx500_cpufreq_exit(struct cpufreq_policy *policy)
static void dbx500_cpufreq_ready(struct cpufreq_policy *policy)
{
cdev = cpufreq_cooling_register(policy->cpus);
cdev = cpufreq_cooling_register(policy);
if (IS_ERR(cdev))
pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev));
else
......
......@@ -320,9 +320,7 @@ static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
info->cdev = of_cpufreq_power_cooling_register(np,
policy->related_cpus,
capacitance,
NULL);
policy, capacitance, NULL);
if (IS_ERR(info->cdev)) {
dev_err(info->cpu_dev,
......
......@@ -278,8 +278,7 @@ static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
if (of_find_property(np, "#cooling-cells", NULL)) {
cpud->cdev = of_cpufreq_cooling_register(np,
policy->related_cpus);
cpud->cdev = of_cpufreq_cooling_register(np, policy);
if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
pr_err("cpu%d is not running as cooling device: %ld\n",
......
......@@ -245,7 +245,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
*/
err = tz->ops->get_trip_temp(tz, 0, &trip_temp);
if (err < 0) {
err = PTR_ERR(tz);
dev_err(&pdev->dev,
"Not able to read trip_temp: %d\n",
err);
......
......@@ -49,40 +49,45 @@
*/
/**
* struct power_table - frequency to power conversion
* struct freq_table - frequency table along with power entries
* @frequency: frequency in KHz
* @power: power in mW
*
* This structure is built when the cooling device registers and helps
* in translating frequency to power and viceversa.
* in translating frequency to power and vice versa.
*/
struct power_table {
struct freq_table {
u32 frequency;
u32 power;
};
/**
* struct time_in_idle - Idle time stats
* @time: previous reading of the absolute time that this cpu was idle
* @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
*/
struct time_in_idle {
u64 time;
u64 timestamp;
};
/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
* registered.
* @cool_dev: thermal_cooling_device pointer to keep track of the
* registered cooling device.
* @last_load: load measured by the latest call to cpufreq_get_requested_power()
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
* @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
* @freq_table: Freq table in descending order of frequencies
* @cdev: thermal_cooling_device pointer to keep track of the
* registered cooling device.
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @last_load: load measured by the latest call to cpufreq_get_requested_power()
* @time_in_idle: previous reading of the absolute time that this cpu was idle
* @time_in_idle_timestamp: wall time of the last invocation of
* get_cpu_idle_time_us()
* @dyn_power_table: array of struct power_table for frequency to power
* conversion, sorted in ascending order.
* @dyn_power_table_entries: number of entries in the @dyn_power_table array
* @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered
* @idle_time: idle time stats
* @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
......@@ -90,80 +95,44 @@ struct power_table {
*/
struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
u32 last_load;
unsigned int cpufreq_state;
unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
struct freq_table *freq_table; /* In descending order */
struct thermal_cooling_device *cdev;
struct cpufreq_policy *policy;
struct list_head node;
u32 last_load;
u64 *time_in_idle;
u64 *time_in_idle_timestamp;
struct power_table *dyn_power_table;
int dyn_power_table_entries;
struct device *cpu_dev;
struct time_in_idle *idle_time;
get_static_t plat_get_static_power;
};
static DEFINE_IDA(cpufreq_ida);
static DEFINE_IDA(cpufreq_ida);
static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
static LIST_HEAD(cpufreq_cdev_list);
/* Below code defines functions to be used for cpufreq as cooling device */
/**
* get_level: Find the level for a particular frequency
* @cpufreq_dev: cpufreq_dev for which the property is required
* @cpufreq_cdev: cpufreq_cdev for which the property is required
* @freq: Frequency
*
* Return: level on success, THERMAL_CSTATE_INVALID on error.
* Return: level corresponding to the frequency.
*/
static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned int freq)
{
struct freq_table *freq_table = cpufreq_cdev->freq_table;
unsigned long level;
for (level = 0; level <= cpufreq_dev->max_level; level++) {
if (freq == cpufreq_dev->freq_table[level])
return level;
if (freq > cpufreq_dev->freq_table[level])
for (level = 1; level <= cpufreq_cdev->max_level; level++)
if (freq > freq_table[level].frequency)
break;
}
return THERMAL_CSTATE_INVALID;
return level - 1;
}
/**
* cpufreq_cooling_get_level - for a given cpu, return the cooling level.
* @cpu: cpu for which the level is required
* @freq: the frequency of interest
*
* This function will match the cooling level corresponding to the
* requested @freq and return it.
*
* Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
* otherwise.
*/
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
struct cpufreq_cooling_device *cpufreq_dev;
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
/**
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
* @nb: struct notifier_block * with callback info.
......@@ -181,14 +150,18 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
{
struct cpufreq_policy *policy = data;
unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
struct cpufreq_cooling_device *cpufreq_cdev;
if (event != CPUFREQ_ADJUST)
return NOTIFY_DONE;
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
/*
* A new copy of the policy is sent to the notifier and can't
* compare that directly.
*/
if (policy->cpu != cpufreq_cdev->policy->cpu)
continue;
/*
......@@ -202,7 +175,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
* But, if clipped_freq is greater than policy->max, we don't
* need to do anything.
*/
clipped_freq = cpufreq_dev->clipped_freq;
clipped_freq = cpufreq_cdev->clipped_freq;
if (policy->max > clipped_freq)
cpufreq_verify_within_limits(policy, 0, clipped_freq);
......@@ -214,63 +187,63 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
}
/**
* build_dyn_power_table() - create a dynamic power to frequency table
* @cpufreq_device: the cpufreq cooling device in which to store the table
* update_freq_table() - Update the freq table with power numbers
* @cpufreq_cdev: the cpufreq cooling device in which to update the table
* @capacitance: dynamic power coefficient for these cpus
*
* Build a dynamic power to frequency table for this cpu and store it
* in @cpufreq_device. This table will be used in cpu_power_to_freq() and
* cpu_freq_to_power() to convert between power and frequency
* efficiently. Power is stored in mW, frequency in KHz. The
* resulting table is in ascending order.
* Update the freq table with power numbers. This table will be used in
* cpu_power_to_freq() and cpu_freq_to_power() to convert between power and
* frequency efficiently. Power is stored in mW, frequency in KHz. The
* resulting table is in descending order.
*
* Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
* -ENOMEM if we run out of memory or -EAGAIN if an OPP was
* added/enabled while the function was executing.
* or -ENOMEM if we run out of memory.
*/
static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
u32 capacitance)
static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
u32 capacitance)
{
struct power_table *power_table;
struct freq_table *freq_table = cpufreq_cdev->freq_table;
struct dev_pm_opp *opp;
struct device *dev = NULL;
int num_opps = 0, cpu, i, ret = 0;
unsigned long freq;
for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
dev = get_cpu_device(cpu);
if (!dev) {
dev_warn(&cpufreq_device->cool_dev->device,
"No cpu device for cpu %d\n", cpu);
continue;
}
int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
num_opps = dev_pm_opp_get_opp_count(dev);
if (num_opps > 0)
break;
else if (num_opps < 0)
return num_opps;
dev = get_cpu_device(cpu);
if (unlikely(!dev)) {
dev_warn(&cpufreq_cdev->cdev->device,
"No cpu device for cpu %d\n", cpu);
return -ENODEV;
}
if (num_opps == 0)
return -EINVAL;
num_opps = dev_pm_opp_get_opp_count(dev);
if (num_opps < 0)
return num_opps;
power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
if (!power_table)
return -ENOMEM;
/*
* The cpufreq table is also built from the OPP table and so the count
* should match.
*/
if (num_opps != cpufreq_cdev->max_level + 1) {
dev_warn(dev, "Number of OPPs not matching with max_levels\n");
return -EINVAL;
}
for (freq = 0, i = 0;
opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
freq++, i++) {
u32 freq_mhz, voltage_mv;
for (i = 0; i <= cpufreq_cdev->max_level; i++) {
unsigned long freq = freq_table[i].frequency * 1000;
u32 freq_mhz = freq_table[i].frequency / 1000;
u64 power;
u32 voltage_mv;
if (i >= num_opps) {
ret = -EAGAIN;
goto free_power_table;
/*
* Find ceil frequency as 'freq' may be slightly lower than OPP
* freq due to truncation while converting to kHz.
*/
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp)) {
dev_err(dev, "failed to get opp for %lu frequency\n",
freq);
return -EINVAL;
}
freq_mhz = freq / 1000000;
voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
dev_pm_opp_put(opp);
......@@ -281,89 +254,73 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
do_div(power, 1000000000);
/* frequency is stored in power_table in KHz */
power_table[i].frequency = freq / 1000;
/* power is stored in mW */
power_table[i].power = power;
freq_table[i].power = power;
}
if (i != num_opps) {
ret = PTR_ERR(opp);
goto free_power_table;
}
cpufreq_device->cpu_dev = dev;
cpufreq_device->dyn_power_table = power_table;
cpufreq_device->dyn_power_table_entries = i;
return 0;
free_power_table:
kfree(power_table);
return ret;
}
static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device,
static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
u32 freq)
{
int i;
struct power_table *pt = cpufreq_device->dyn_power_table;
struct freq_table *freq_table = cpufreq_cdev->freq_table;
for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
if (freq < pt[i].frequency)
for (i = 1; i <= cpufreq_cdev->max_level; i++)
if (freq > freq_table[i].frequency)
break;
return pt[i - 1].power;
return freq_table[i - 1].power;
}
static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
u32 power)
{
int i;
struct power_table *pt = cpufreq_device->dyn_power_table;
struct freq_table *freq_table = cpufreq_cdev->freq_table;
for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
if (power < pt[i].power)
for (i = 1; i <= cpufreq_cdev->max_level; i++)
if (power > freq_table[i].power)
break;
return pt[i - 1].frequency;
return freq_table[i - 1].frequency;
}
/**
* get_load() - get load for a cpu since last updated
* @cpufreq_device: &struct cpufreq_cooling_device for this cpu
* @cpufreq_cdev: &struct cpufreq_cooling_device for this cpu
* @cpu: cpu number
* @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
* @cpu_idx: index of the cpu in time_in_idle*
*
* Return: The average load of cpu @cpu in percentage since this
* function was last called.
*/
static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
int cpu_idx)
{
u32 load;
u64 now, now_idle, delta_time, delta_idle;
struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx];
now_idle = get_cpu_idle_time(cpu, &now, 0);
delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
delta_idle = now_idle - idle_time->time;
delta_time = now - idle_time->timestamp;
if (delta_time <= delta_idle)
load = 0;
else
load = div64_u64(100 * (delta_time - delta_idle), delta_time);
cpufreq_device->time_in_idle[cpu_idx] = now_idle;
cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
idle_time->time = now_idle;
idle_time->timestamp = now;
return load;
}
/**
* get_static_power() - calculate the static power consumed by the cpus
* @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev
* @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev
* @tz: thermal zone device in which we're operating
* @freq: frequency in KHz
* @power: pointer in which to store the calculated static power
......@@ -376,26 +333,28 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
*
* Return: 0 on success, -E* on failure.
*/
static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
struct thermal_zone_device *tz, unsigned long freq,
u32 *power)
{
struct dev_pm_opp *opp;
unsigned long voltage;
struct cpumask *cpumask = &cpufreq_device->allowed_cpus;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
struct cpumask *cpumask = policy->related_cpus;
unsigned long freq_hz = freq * 1000;
struct device *dev;
if (!cpufreq_device->plat_get_static_power ||
!cpufreq_device->cpu_dev) {
if (!cpufreq_cdev->plat_get_static_power) {
*power = 0;
return 0;
}
opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
true);
dev = get_cpu_device(policy->cpu);
WARN_ON(!dev);
opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
if (IS_ERR(opp)) {
dev_warn_ratelimited(cpufreq_device->cpu_dev,
"Failed to find OPP for frequency %lu: %ld\n",
dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
freq_hz, PTR_ERR(opp));
return -EINVAL;
}
......@@ -404,31 +363,30 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
dev_pm_opp_put(opp);
if (voltage == 0) {
dev_err_ratelimited(cpufreq_device->cpu_dev,
"Failed to get voltage for frequency %lu\n",
dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
freq_hz);
return -EINVAL;
}
return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay,
voltage, power);
return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
voltage, power);
}
/**
* get_dynamic_power() - calculate the dynamic power
* @cpufreq_device: &cpufreq_cooling_device for this cdev
* @cpufreq_cdev: &cpufreq_cooling_device for this cdev
* @freq: current frequency
*
* Return: the dynamic power consumed by the cpus described by
* @cpufreq_device.
* @cpufreq_cdev.
*/
static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned long freq)
{
u32 raw_cpu_power;
raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq);
return (raw_cpu_power * cpufreq_device->last_load) / 100;
raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
}
/* cpufreq cooling device callback functions are defined below */
......@@ -446,9 +404,9 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
*state = cpufreq_device->max_level;
*state = cpufreq_cdev->max_level;
return 0;
}
......@@ -465,9 +423,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
*state = cpufreq_device->cpufreq_state;
*state = cpufreq_cdev->cpufreq_state;
return 0;
}
......@@ -485,23 +443,22 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
unsigned int clip_freq;
/* Request state should be less than max_level */
if (WARN_ON(state > cpufreq_device->max_level))
if (WARN_ON(state > cpufreq_cdev->max_level))
return -EINVAL;
/* Check if the old cooling action is same as new cooling action */
if (cpufreq_device->cpufreq_state == state)
if (cpufreq_cdev->cpufreq_state == state)
return 0;
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
cpufreq_device->clipped_freq = clip_freq;
clip_freq = cpufreq_cdev->freq_table[state].frequency;
cpufreq_cdev->cpufreq_state = state;
cpufreq_cdev->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
cpufreq_update_policy(cpufreq_cdev->policy->cpu);
return 0;
}
......@@ -536,33 +493,23 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
unsigned long freq;
int i = 0, cpu, ret;
u32 static_power, dynamic_power, total_load = 0;
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
u32 *load_cpu = NULL;
cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
/*
* All the CPUs are offline, thus the requested power by
* the cdev is 0
*/
if (cpu >= nr_cpu_ids) {
*power = 0;
return 0;
}
freq = cpufreq_quick_get(cpu);
freq = cpufreq_quick_get(policy->cpu);
if (trace_thermal_power_cpu_get_power_enabled()) {
u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus);
u32 ncpus = cpumask_weight(policy->related_cpus);
load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
}
for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
for_each_cpu(cpu, policy->related_cpus) {
u32 load;
if (cpu_online(cpu))
load = get_load(cpufreq_device, cpu, i);
load = get_load(cpufreq_cdev, cpu, i);
else
load = 0;
......@@ -573,19 +520,19 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
i++;
}
cpufreq_device->last_load = total_load;
cpufreq_cdev->last_load = total_load;
dynamic_power = get_dynamic_power(cpufreq_device, freq);
ret = get_static_power(cpufreq_device, tz, freq, &static_power);
dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
if (ret) {
kfree(load_cpu);
return ret;
}
if (load_cpu) {
trace_thermal_power_cpu_get_power(
&cpufreq_device->allowed_cpus,
freq, load_cpu, i, dynamic_power, static_power);
trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
load_cpu, i, dynamic_power,
static_power);
kfree(load_cpu);
}
......@@ -614,38 +561,23 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus;
cpumask_var_t cpumask;
u32 static_power, dynamic_power;
int ret;
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
cpumask_and(cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask);
num_cpus = cpumask_weight(cpumask);
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* None of our cpus are online, so no power */
if (num_cpus == 0) {
*power = 0;
ret = 0;
goto out;
}
/* Request state should be less than max_level */
if (WARN_ON(state > cpufreq_cdev->max_level))
return -EINVAL;
freq = cpufreq_device->freq_table[state];
if (!freq) {
ret = -EINVAL;
goto out;
}
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus;
ret = get_static_power(cpufreq_device, tz, freq, &static_power);
freq = cpufreq_cdev->freq_table[state].frequency;
dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
if (ret)
goto out;
return ret;
*power = static_power + dynamic_power;
out:
free_cpumask_var(cpumask);
return ret;
}
......@@ -673,39 +605,27 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz, u32 power,
unsigned long *state)
{
unsigned int cpu, cur_freq, target_freq;
unsigned int cur_freq, target_freq;
int ret;
s32 dyn_power;
u32 last_load, normalised_power, static_power;
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
/* None of our cpus are online */
if (cpu >= nr_cpu_ids)
return -ENODEV;
cur_freq = cpufreq_quick_get(cpu);
ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power);
cur_freq = cpufreq_quick_get(policy->cpu);
ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
if (ret)
return ret;
dyn_power = power - static_power;
dyn_power = dyn_power > 0 ? dyn_power : 0;
last_load = cpufreq_device->last_load ?: 1;
last_load = cpufreq_cdev->last_load ?: 1;
normalised_power = (dyn_power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_device, normalised_power);
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
*state = cpufreq_cooling_get_level(cpu, target_freq);
if (*state == THERMAL_CSTATE_INVALID) {
dev_err_ratelimited(&cdev->device,
"Failed to convert %dKHz for cpu %d into a cdev state\n",
target_freq, cpu);
return -EINVAL;
}
trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus,
target_freq, *state, power);
*state = get_level(cpufreq_cdev, target_freq);
trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state,
power);
return 0;
}
......@@ -748,7 +668,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @capacitance: dynamic power coefficient for these cpus
* @plat_static_func: function to calculate the static power consumed by these
......@@ -764,102 +684,68 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus, u32 capacitance,
struct cpufreq_policy *policy, u32 capacitance,
get_static_t plat_static_func)
{
struct cpufreq_policy *policy;
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev;
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
char dev_name[THERMAL_NAME_LENGTH];
struct cpufreq_frequency_table *pos, *table;
cpumask_var_t temp_mask;
unsigned int freq, i, num_cpus;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
bool first;
if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
cpumask_and(temp_mask, clip_cpus, cpu_online_mask);
policy = cpufreq_cpu_get(cpumask_first(temp_mask));
if (!policy) {
pr_debug("%s: CPUFreq policy not found\n", __func__);
cool_dev = ERR_PTR(-EPROBE_DEFER);
goto free_cpumask;
if (IS_ERR_OR_NULL(policy)) {
pr_err("%s: cpufreq policy isn't valid: %p", __func__, policy);
return ERR_PTR(-EINVAL);
}
table = policy->freq_table;
if (!table) {
pr_debug("%s: CPUFreq table not found\n", __func__);
cool_dev = ERR_PTR(-ENODEV);
goto put_policy;
i = cpufreq_table_count_valid_entries(policy);
if (!i) {
pr_debug("%s: CPUFreq table not found or has no valid entries\n",
__func__);
return ERR_PTR(-ENODEV);
}
cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
if (!cpufreq_dev) {
cool_dev = ERR_PTR(-ENOMEM);
goto put_policy;
}
cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL);
if (!cpufreq_cdev)
return ERR_PTR(-ENOMEM);
num_cpus = cpumask_weight(clip_cpus);
cpufreq_dev->time_in_idle = kcalloc(num_cpus,
sizeof(*cpufreq_dev->time_in_idle),
GFP_KERNEL);
if (!cpufreq_dev->time_in_idle) {
cool_dev = ERR_PTR(-ENOMEM);
cpufreq_cdev->policy = policy;
num_cpus = cpumask_weight(policy->related_cpus);
cpufreq_cdev->idle_time = kcalloc(num_cpus,
sizeof(*cpufreq_cdev->idle_time),
GFP_KERNEL);
if (!cpufreq_cdev->idle_time) {
cdev = ERR_PTR(-ENOMEM);
goto free_cdev;
}
cpufreq_dev->time_in_idle_timestamp =
kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp),
GFP_KERNEL);
if (!cpufreq_dev->time_in_idle_timestamp) {
cool_dev = ERR_PTR(-ENOMEM);
goto free_time_in_idle;
}
/* Find max levels */
cpufreq_for_each_valid_entry(pos, table)
cpufreq_dev->max_level++;
cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
cpufreq_dev->max_level, GFP_KERNEL);
if (!cpufreq_dev->freq_table) {
cool_dev = ERR_PTR(-ENOMEM);
goto free_time_in_idle_timestamp;
}
/* max_level is an index, not a counter */
cpufreq_dev->max_level--;
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
if (capacitance) {
cpufreq_dev->plat_get_static_power = plat_static_func;
ret = build_dyn_power_table(cpufreq_dev, capacitance);
if (ret) {
cool_dev = ERR_PTR(ret);
goto free_table;
}
cooling_ops = &cpufreq_power_cooling_ops;
} else {
cooling_ops = &cpufreq_cooling_ops;
cpufreq_cdev->max_level = i - 1;
cpufreq_cdev->freq_table = kmalloc_array(i,
sizeof(*cpufreq_cdev->freq_table),
GFP_KERNEL);
if (!cpufreq_cdev->freq_table) {
cdev = ERR_PTR(-ENOMEM);
goto free_idle_time;
}
ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
if (ret < 0) {
cool_dev = ERR_PTR(ret);
goto free_power_table;
cdev = ERR_PTR(ret);
goto free_table;
}
cpufreq_dev->id = ret;
cpufreq_cdev->id = ret;
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
cpufreq_cdev->id);
/* Fill freq-table in descending order of frequencies */
for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
freq = find_next_max(table, freq);
cpufreq_dev->freq_table[i] = freq;
for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
freq = find_next_max(policy->freq_table, freq);
cpufreq_cdev->freq_table[i].frequency = freq;
/* Warn for duplicate entries */
if (!freq)
......@@ -868,51 +754,54 @@ __cpufreq_cooling_register(struct device_node *np,
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
cpufreq_dev->id);
if (capacitance) {
cpufreq_cdev->plat_get_static_power = plat_static_func;
ret = update_freq_table(cpufreq_cdev, capacitance);
if (ret) {
cdev = ERR_PTR(ret);
goto remove_ida;
}
cooling_ops = &cpufreq_power_cooling_ops;
} else {
cooling_ops = &cpufreq_cooling_ops;
}
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
cooling_ops);
if (IS_ERR(cool_dev))
cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
cooling_ops);
if (IS_ERR(cdev))
goto remove_ida;
cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
cpufreq_cdev->cdev = cdev;
mutex_lock(&cooling_list_lock);
/* Register the notifier for first cpufreq cooling device */
first = list_empty(&cpufreq_dev_list);
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
first = list_empty(&cpufreq_cdev_list);
list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
mutex_unlock(&cooling_list_lock);
if (first)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
goto put_policy;
return cdev;
remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
free_power_table:
kfree(cpufreq_dev->dyn_power_table);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_table:
kfree(cpufreq_dev->freq_table);
free_time_in_idle_timestamp:
kfree(cpufreq_dev->time_in_idle_timestamp);
free_time_in_idle:
kfree(cpufreq_dev->time_in_idle);
kfree(cpufreq_cdev->freq_table);
free_idle_time:
kfree(cpufreq_cdev->idle_time);
free_cdev:
kfree(cpufreq_dev);
put_policy:
cpufreq_cpu_put(policy);
free_cpumask:
free_cpumask_var(temp_mask);
return cool_dev;
kfree(cpufreq_cdev);
return cdev;
}
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
......@@ -922,16 +811,16 @@ __cpufreq_cooling_register(struct device_node *np,
* on failure, it returns a corresponding ERR_PTR().
*/
struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
return __cpufreq_cooling_register(NULL, policy, 0, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
......@@ -943,18 +832,18 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
*/
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
struct cpufreq_policy *policy)
{
if (!np)
return ERR_PTR(-EINVAL);
return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
return __cpufreq_cooling_register(np, policy, 0, NULL);
}
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
* cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
* @clip_cpus: cpumask of cpus where the frequency constraints will happen
* @policy: cpufreq policy
* @capacitance: dynamic power coefficient for these cpus
* @plat_static_func: function to calculate the static power consumed by these
* cpus (optional)
......@@ -974,10 +863,10 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
* on failure, it returns a corresponding ERR_PTR().
*/
struct thermal_cooling_device *
cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
get_static_t plat_static_func)
{
return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
return __cpufreq_cooling_register(NULL, policy, capacitance,
plat_static_func);
}
EXPORT_SYMBOL(cpufreq_power_cooling_register);
......@@ -985,7 +874,7 @@ EXPORT_SYMBOL(cpufreq_power_cooling_register);
/**
* of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen
* @policy: cpufreq policy
* @capacitance: dynamic power coefficient for these cpus
* @plat_static_func: function to calculate the static power consumed by these
* cpus (optional)
......@@ -1007,14 +896,14 @@ EXPORT_SYMBOL(cpufreq_power_cooling_register);
*/
struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus,
struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
if (!np)
return ERR_PTR(-EINVAL);
return __cpufreq_cooling_register(np, clip_cpus, capacitance,
return __cpufreq_cooling_register(np, policy, capacitance,
plat_static_func);
}
EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
......@@ -1027,30 +916,28 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
struct cpufreq_cooling_device *cpufreq_dev;
struct cpufreq_cooling_device *cpufreq_cdev;
bool last;
if (!cdev)
return;
cpufreq_dev = cdev->devdata;
cpufreq_cdev = cdev->devdata;
mutex_lock(&cooling_list_lock);
list_del(&cpufreq_dev->node);
list_del(&cpufreq_cdev->node);
/* Unregister the notifier for the last cpufreq cooling device */
last = list_empty(&cpufreq_dev_list);
last = list_empty(&cpufreq_cdev_list);
mutex_unlock(&cooling_list_lock);
if (last)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
kfree(cpufreq_dev->dyn_power_table);
kfree(cpufreq_dev->time_in_idle_timestamp);
kfree(cpufreq_dev->time_in_idle);
kfree(cpufreq_dev->freq_table);
kfree(cpufreq_dev);
thermal_cooling_device_unregister(cpufreq_cdev->cdev);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table);
kfree(cpufreq_cdev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
......@@ -71,6 +71,7 @@ static long get_target_state(struct thermal_zone_device *tz,
/**
* fair_share_throttle - throttles devices associated with the given zone
* @tz - thermal_zone_device
* @trip - trip point index
*
* Throttling Logic: This uses three parameters to calculate the new
* throttle state of the cooling devices associated with the given zone.
......
......@@ -397,8 +397,11 @@ static int hisi_thermal_suspend(struct device *dev)
static int hisi_thermal_resume(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
int ret;
clk_prepare_enable(data->clk);
ret = clk_prepare_enable(data->clk);
if (ret)
return ret;
data->irq_enabled = true;
hisi_thermal_enable_bind_irq_sensor(data);
......
......@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h>
#include <linux/device.h>
......@@ -88,6 +89,7 @@ static struct thermal_soc_data thermal_imx6sx_data = {
};
struct imx_thermal_data {
struct cpufreq_policy *policy;
struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
enum thermal_device_mode mode;
......@@ -525,13 +527,18 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
data->cdev = cpufreq_cooling_register(cpu_present_mask);
data->policy = cpufreq_cpu_get(0);
if (!data->policy) {
pr_debug("%s: CPUFreq policy not found\n", __func__);
return -EPROBE_DEFER;
}
data->cdev = cpufreq_cooling_register(data->policy);
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"failed to register cpufreq cooling device: %d\n",
ret);
dev_err(&pdev->dev,
"failed to register cpufreq cooling device: %d\n", ret);
cpufreq_cpu_put(data->policy);
return ret;
}
......@@ -542,6 +549,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"failed to get thermal clk: %d\n", ret);
cpufreq_cooling_unregister(data->cdev);
cpufreq_cpu_put(data->policy);
return ret;
}
......@@ -556,6 +564,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
cpufreq_cooling_unregister(data->cdev);
cpufreq_cpu_put(data->policy);
return ret;
}
......@@ -571,6 +580,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
"failed to register thermal zone device %d\n", ret);
clk_disable_unprepare(data->thermal_clk);
cpufreq_cooling_unregister(data->cdev);
cpufreq_cpu_put(data->policy);
return ret;
}
......@@ -599,6 +609,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
clk_disable_unprepare(data->thermal_clk);
thermal_zone_device_unregister(data->tz);
cpufreq_cooling_unregister(data->cdev);
cpufreq_cpu_put(data->policy);
return ret;
}
......@@ -620,6 +631,7 @@ static int imx_thermal_remove(struct platform_device *pdev)
thermal_zone_device_unregister(data->tz);
cpufreq_cooling_unregister(data->cdev);
cpufreq_cpu_put(data->policy);
return 0;
}
......@@ -648,8 +660,11 @@ static int imx_thermal_resume(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
int ret;
clk_prepare_enable(data->thermal_clk);
ret = clk_prepare_enable(data->thermal_clk);
if (ret)
return ret;
/* Enabled thermal sensor after resume */
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
......
......@@ -62,8 +62,8 @@ static int acpi_thermal_rel_release(struct inode *inode, struct file *file)
* acpi_parse_trt - Thermal Relationship Table _TRT for passive cooling
*
* @handle: ACPI handle of the device contains _TRT
* @art_count: the number of valid entries resulted from parsing _TRT
* @artp: pointer to pointer of array of art entries in parsing result
* @trt_count: the number of valid entries resulted from parsing _TRT
* @trtp: pointer to pointer of array of _TRT entries in parsing result
* @create_dev: whether to create platform devices for target and source
*
*/
......@@ -208,7 +208,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
if (art->target) {
result = acpi_bus_get_device(art->target, &adev);
if (result)
pr_warn("Failed to get source ACPI device\n");
pr_warn("Failed to get target ACPI device\n");
}
}
......
......@@ -238,8 +238,16 @@ static int int3403_add(struct platform_device *pdev)
status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
NULL, &priv->type);
if (ACPI_FAILURE(status)) {
result = -EINVAL;
goto err;
unsigned long long tmp;
status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
NULL, &tmp);
if (ACPI_FAILURE(status)) {
result = -EINVAL;
goto err;
} else {
priv->type = INT3403_TYPE_SENSOR;
}
}
platform_set_drvdata(pdev, priv);
......
......@@ -186,8 +186,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/**
* step_wise_throttle - throttles devices associated with the given zone
* @tz - thermal_zone_device
* @trip - the trip point
* @trip_type - type of the trip point
* @trip - trip point index
*
* Throttling Logic: This uses the trend of the thermal zone to throttle.
* If the thermal zone is 'heating up' this throttles all the cooling
......
......@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/thermal.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/of.h>
......@@ -37,6 +38,7 @@
/* common data structures */
struct ti_thermal_data {
struct cpufreq_policy *policy;
struct thermal_zone_device *ti_thermal;
struct thermal_zone_device *pcb_tz;
struct thermal_cooling_device *cool_dev;
......@@ -247,15 +249,19 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
if (!data)
return -EINVAL;
data->policy = cpufreq_cpu_get(0);
if (!data->policy) {
pr_debug("%s: CPUFreq policy not found\n", __func__);
return -EPROBE_DEFER;
}
/* Register cooling device */
data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
data->cool_dev = cpufreq_cooling_register(data->policy);
if (IS_ERR(data->cool_dev)) {
int ret = PTR_ERR(data->cool_dev);
if (ret != -EPROBE_DEFER)
dev_err(bgp->dev,
"Failed to register cpu cooling device %d\n",
ret);
dev_err(bgp->dev, "Failed to register cpu cooling device %d\n",
ret);
cpufreq_cpu_put(data->policy);
return ret;
}
......@@ -270,8 +276,10 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
data = ti_bandgap_get_sensor_data(bgp, id);
if (data)
if (data) {
cpufreq_cooling_unregister(data->cool_dev);
cpufreq_cpu_put(data->policy);
}
return 0;
}
......@@ -24,12 +24,13 @@
#include <linux/thermal.h>
#include <linux/slab.h>
#include "thermal_core.h"
/**
* notify_user_space - Notifies user space about thermal events
* @tz - thermal_zone_device
* @trip - Trip point index
* @trip - trip point index
*
* This function notifies the user space through UEvents.
*/
......
......@@ -28,47 +28,49 @@
#include <linux/thermal.h>
#include <linux/cpumask.h>
struct cpufreq_policy;
typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
unsigned long voltage, u32 *power);
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @clip_cpus: cpumask of cpus where the frequency constraints will happen
* @policy: cpufreq policy.
*/
struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus);
cpufreq_cooling_register(struct cpufreq_policy *policy);
struct thermal_cooling_device *
cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
cpufreq_power_cooling_register(struct cpufreq_policy *policy,
u32 capacitance, get_static_t plat_static_func);
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @np: a valid struct device_node to the cooling device device tree node.
* @clip_cpus: cpumask of cpus where the frequency constraints will happen
* @policy: cpufreq policy.
*/
#ifdef CONFIG_THERMAL_OF
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus);
struct cpufreq_policy *policy);
struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus,
struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func);
#else
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus,
struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
......@@ -82,15 +84,14 @@ of_cpufreq_power_cooling_register(struct device_node *np,
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
#else /* !CONFIG_CPU_THERMAL */
static inline struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
cpufreq_power_cooling_register(struct cpufreq_policy *policy,
u32 capacitance, get_static_t plat_static_func)
{
return NULL;
......@@ -98,14 +99,14 @@ cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus,
struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
......@@ -117,11 +118,6 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
return;
}
static inline
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
return THERMAL_CSTATE_INVALID;
}
#endif /* CONFIG_CPU_THERMAL */
#endif /* __CPU_COOLING_H__ */
......@@ -862,6 +862,20 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
return -EINVAL;
}
}
static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos;
int count = 0;
if (unlikely(!policy->freq_table))
return 0;
cpufreq_for_each_valid_entry(pos, policy->freq_table)
count++;
return count;
}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment