Commit 4960821a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.11-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates from Rafael Wysocki:
 "These update the CPPC cpufreq driver and intel_pstate (which involves
  updating the cpufreq core and the schedutil governor) and make
  janitorial changes in the ACPI code handling processor objects.

  Specifics:

   - Rework the passive-mode "fast switch" path in the intel_pstate
     driver to allow it receive the minimum (required) and target
     (desired) performance information from the schedutil governor so as
     to avoid running some workloads too fast (Rafael Wysocki).

   - Make the intel_pstate driver allow the policy max limit to be
     increased after the guaranteed performance value for the given CPU
     has increased (Rafael Wysocki).

   - Clean up the handling of CPU coordination types in the CPPC cpufreq
     driver and make it export frequency domains information to user
     space via sysfs (Ionela Voinescu).

   - Fix the ACPI code handling processor objects to use a correct
     coordination type when it fails to map frequency domains and drop a
     redundant CPU map initialization from it (Ionela Voinescu, Punit
     Agrawal)"

* tag 'pm-5.11-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq: intel_pstate: Use most recent guaranteed performance values
  cpufreq: intel_pstate: Implement the ->adjust_perf() callback
  cpufreq: Add special-purpose fast-switching callback for drivers
  cpufreq: schedutil: Add util to struct sg_cpu
  cppc_cpufreq: replace per-cpu data array with a list
  cppc_cpufreq: expose information on frequency domains
  cppc_cpufreq: clarify support for coordination types
  cppc_cpufreq: use policy->cpu as driver of frequency setting
  ACPI: processor: fix NONE coordination for domain mapping failure
parents 2762db75 c3a74f8e
...@@ -264,7 +264,8 @@ Description: Discover CPUs in the same CPU frequency coordination domain ...@@ -264,7 +264,8 @@ Description: Discover CPUs in the same CPU frequency coordination domain
attribute is useful for user space DVFS controllers to get better attribute is useful for user space DVFS controllers to get better
power/performance results for platforms using acpi-cpufreq. power/performance results for platforms using acpi-cpufreq.
This file is only present if the acpi-cpufreq driver is in use. This file is only present if the acpi-cpufreq or the cppc-cpufreq
drivers are in use.
What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1} What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1}
......
...@@ -414,109 +414,88 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) ...@@ -414,109 +414,88 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
return result; return result;
} }
bool acpi_cpc_valid(void)
{
struct cpc_desc *cpc_ptr;
int cpu;
for_each_possible_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(acpi_cpc_valid);
/** /**
* acpi_get_psd_map - Map the CPUs in a common freq domain. * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
* @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info. * @cpu: Find all CPUs that share a domain with cpu.
* @cpu_data: Pointer to CPU specific CPPC data including PSD info.
* *
* Return: 0 for success or negative value for err. * Return: 0 for success or negative value for err.
*/ */
int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
{ {
int count_target;
int retval = 0;
unsigned int i, j;
cpumask_var_t covered_cpus;
struct cppc_cpudata *pr, *match_pr;
struct acpi_psd_package *pdomain;
struct acpi_psd_package *match_pdomain;
struct cpc_desc *cpc_ptr, *match_cpc_ptr; struct cpc_desc *cpc_ptr, *match_cpc_ptr;
struct acpi_psd_package *match_pdomain;
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) struct acpi_psd_package *pdomain;
return -ENOMEM; int count_target, i;
/* /*
* Now that we have _PSD data from all CPUs, let's setup P-state * Now that we have _PSD data from all CPUs, let's setup P-state
* domain info. * domain info.
*/ */
for_each_possible_cpu(i) { cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (cpumask_test_cpu(i, covered_cpus)) if (!cpc_ptr)
continue; return -EFAULT;
pr = all_cpu_data[i];
cpc_ptr = per_cpu(cpc_desc_ptr, i);
if (!cpc_ptr) {
retval = -EFAULT;
goto err_ret;
}
pdomain = &(cpc_ptr->domain_info); pdomain = &(cpc_ptr->domain_info);
cpumask_set_cpu(i, pr->shared_cpu_map); cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
cpumask_set_cpu(i, covered_cpus); if (pdomain->num_processors <= 1)
if (pdomain->num_processors <= 1) return 0;
continue;
/* Validate the Domain info */ /* Validate the Domain info */
count_target = pdomain->num_processors; count_target = pdomain->num_processors;
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
pr->shared_type = CPUFREQ_SHARED_TYPE_HW; cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
pr->shared_type = CPUFREQ_SHARED_TYPE_ANY; cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
for_each_possible_cpu(j) {
if (i == j)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
if (!match_cpc_ptr) {
retval = -EFAULT;
goto err_ret;
}
match_pdomain = &(match_cpc_ptr->domain_info); for_each_possible_cpu(i) {
if (match_pdomain->domain != pdomain->domain) if (i == cpu)
continue; continue;
/* Here i and j are in the same domain */ match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
if (match_pdomain->num_processors != count_target) { if (!match_cpc_ptr)
retval = -EFAULT; goto err_fault;
goto err_ret;
}
if (pdomain->coord_type != match_pdomain->coord_type) { match_pdomain = &(match_cpc_ptr->domain_info);
retval = -EFAULT; if (match_pdomain->domain != pdomain->domain)
goto err_ret; continue;
}
cpumask_set_cpu(j, covered_cpus); /* Here i and cpu are in the same domain */
cpumask_set_cpu(j, pr->shared_cpu_map); if (match_pdomain->num_processors != count_target)
} goto err_fault;
for_each_cpu(j, pr->shared_cpu_map) { if (pdomain->coord_type != match_pdomain->coord_type)
if (i == j) goto err_fault;
continue;
match_pr = all_cpu_data[j]; cpumask_set_cpu(i, cpu_data->shared_cpu_map);
match_pr->shared_type = pr->shared_type;
cpumask_copy(match_pr->shared_cpu_map,
pr->shared_cpu_map);
}
} }
goto out;
err_ret: return 0;
for_each_possible_cpu(i) {
pr = all_cpu_data[i];
/* Assume no coordination on any error parsing domain info */ err_fault:
cpumask_clear(pr->shared_cpu_map); /* Assume no coordination on any error parsing domain info */
cpumask_set_cpu(i, pr->shared_cpu_map); cpumask_clear(cpu_data->shared_cpu_map);
pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
} cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
out:
free_cpumask_var(covered_cpus); return -EFAULT;
return retval;
} }
EXPORT_SYMBOL_GPL(acpi_get_psd_map); EXPORT_SYMBOL_GPL(acpi_get_psd_map);
......
...@@ -708,7 +708,7 @@ int acpi_processor_preregister_performance( ...@@ -708,7 +708,7 @@ int acpi_processor_preregister_performance(
if (retval) { if (retval) {
cpumask_clear(pr->performance->shared_cpu_map); cpumask_clear(pr->performance->shared_cpu_map);
cpumask_set_cpu(i, pr->performance->shared_cpu_map); cpumask_set_cpu(i, pr->performance->shared_cpu_map);
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
} }
pr->performance = NULL; /* Will be set for real in register */ pr->performance = NULL; /* Will be set for real in register */
} }
......
...@@ -30,13 +30,13 @@ ...@@ -30,13 +30,13 @@
#define DMI_PROCESSOR_MAX_SPEED 0x14 #define DMI_PROCESSOR_MAX_SPEED 0x14
/* /*
* These structs contain information parsed from per CPU * This list contains information parsed from per CPU ACPI _CPC and _PSD
* ACPI _CPC structures. * structures: e.g. the highest and lowest supported performance, capabilities,
* e.g. For each CPU the highest, lowest supported * desired performance, level requested etc. Depending on the share_type, not
* performance capabilities, desired performance level * all CPUs will have an entry in the list.
* requested etc.
*/ */
static struct cppc_cpudata **all_cpu_data; static LIST_HEAD(cpu_data_list);
static bool boost_supported; static bool boost_supported;
struct cppc_workaround_oem_info { struct cppc_workaround_oem_info {
...@@ -148,8 +148,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, ...@@ -148,8 +148,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation) unsigned int relation)
{ {
struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
struct cpufreq_freqs freqs; struct cpufreq_freqs freqs;
u32 desired_perf; u32 desired_perf;
int ret = 0; int ret = 0;
...@@ -164,12 +166,12 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -164,12 +166,12 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
freqs.new = target_freq; freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs); cpufreq_freq_transition_begin(policy, &freqs);
ret = cppc_set_perf(cpu_data->cpu, &cpu_data->perf_ctrls); ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
cpufreq_freq_transition_end(policy, &freqs, ret != 0); cpufreq_freq_transition_end(policy, &freqs, ret != 0);
if (ret) if (ret)
pr_debug("Failed to set target on CPU:%d. ret:%d\n", pr_debug("Failed to set target on CPU:%d. ret:%d\n",
cpu_data->cpu, ret); cpu, ret);
return ret; return ret;
} }
...@@ -182,7 +184,7 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy) ...@@ -182,7 +184,7 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{ {
struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps; struct cppc_perf_caps *caps = &cpu_data->perf_caps;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
int ret; int ret;
...@@ -193,6 +195,12 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) ...@@ -193,6 +195,12 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
if (ret) if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->lowest_perf, cpu, ret); caps->lowest_perf, cpu, ret);
/* Remove CPU node from list and free driver data for policy */
free_cpumask_var(cpu_data->shared_cpu_map);
list_del(&cpu_data->node);
kfree(policy->driver_data);
policy->driver_data = NULL;
} }
/* /*
...@@ -238,25 +246,61 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) ...@@ -238,25 +246,61 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
} }
#endif #endif
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
{ {
struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; struct cppc_cpudata *cpu_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps; int ret;
unsigned int cpu = policy->cpu;
int ret = 0;
cpu_data->cpu = cpu; cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
ret = cppc_get_perf_caps(cpu, caps); if (!cpu_data)
goto out;
if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
goto free_cpu;
ret = acpi_get_psd_map(cpu, cpu_data);
if (ret) { if (ret) {
pr_debug("Err reading CPU%d perf capabilities. ret:%d\n", pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
cpu, ret); goto free_mask;
return ret; }
ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
if (ret) {
pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
goto free_mask;
} }
/* Convert the lowest and nominal freq from MHz to KHz */ /* Convert the lowest and nominal freq from MHz to KHz */
caps->lowest_freq *= 1000; cpu_data->perf_caps.lowest_freq *= 1000;
caps->nominal_freq *= 1000; cpu_data->perf_caps.nominal_freq *= 1000;
list_add(&cpu_data->node, &cpu_data_list);
return cpu_data;
free_mask:
free_cpumask_var(cpu_data->shared_cpu_map);
free_cpu:
kfree(cpu_data);
out:
return NULL;
}
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
struct cppc_cpudata *cpu_data;
struct cppc_perf_caps *caps;
int ret;
cpu_data = cppc_cpufreq_get_cpu_data(cpu);
if (!cpu_data) {
pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
return -ENODEV;
}
caps = &cpu_data->perf_caps;
policy->driver_data = cpu_data;
/* /*
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
...@@ -280,26 +324,25 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -280,26 +324,25 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type; policy->shared_type = cpu_data->shared_type;
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { switch (policy->shared_type) {
int i; case CPUFREQ_SHARED_TYPE_HW:
case CPUFREQ_SHARED_TYPE_NONE:
/* Nothing to be done - we'll have a policy for each CPU */
break;
case CPUFREQ_SHARED_TYPE_ANY:
/*
* All CPUs in the domain will share a policy and all cpufreq
* operations will use a single cppc_cpudata structure stored
* in policy->driver_data.
*/
cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
break;
for_each_cpu(i, policy->cpus) { default:
if (unlikely(i == cpu)) pr_debug("Unsupported CPU co-ord type: %d\n",
continue; policy->shared_type);
memcpy(&all_cpu_data[i]->perf_caps, caps,
sizeof(cpu_data->perf_caps));
}
} else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
/* Support only SW_ANY for now. */
pr_debug("Unsupported CPU co-ord type\n");
return -EFAULT; return -EFAULT;
} }
cpu_data->cur_policy = policy;
/* /*
* If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
* is supported. * is supported.
...@@ -354,9 +397,12 @@ static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, ...@@ -354,9 +397,12 @@ static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{ {
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cppc_cpudata *cpu_data = all_cpu_data[cpu]; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data = policy->driver_data;
int ret; int ret;
cpufreq_cpu_put(policy);
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
if (ret) if (ret)
return ret; return ret;
...@@ -372,7 +418,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) ...@@ -372,7 +418,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
{ {
struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps; struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret; int ret;
...@@ -396,6 +442,19 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) ...@@ -396,6 +442,19 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
return 0; return 0;
} }
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
}
cpufreq_freq_attr_ro(freqdomain_cpus);
static struct freq_attr *cppc_cpufreq_attr[] = {
&freqdomain_cpus,
NULL,
};
static struct cpufreq_driver cppc_cpufreq_driver = { static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy, .verify = cppc_verify_policy,
...@@ -404,6 +463,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = { ...@@ -404,6 +463,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.init = cppc_cpufreq_cpu_init, .init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu, .stop_cpu = cppc_cpufreq_stop_cpu,
.set_boost = cppc_cpufreq_set_boost, .set_boost = cppc_cpufreq_set_boost,
.attr = cppc_cpufreq_attr,
.name = "cppc_cpufreq", .name = "cppc_cpufreq",
}; };
...@@ -415,10 +475,13 @@ static struct cpufreq_driver cppc_cpufreq_driver = { ...@@ -415,10 +475,13 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
*/ */
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
{ {
struct cppc_cpudata *cpu_data = all_cpu_data[cpu]; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data = policy->driver_data;
u64 desired_perf; u64 desired_perf;
int ret; int ret;
cpufreq_cpu_put(policy);
ret = cppc_get_desired_perf(cpu, &desired_perf); ret = cppc_get_desired_perf(cpu, &desired_perf);
if (ret < 0) if (ret < 0)
return -EIO; return -EIO;
...@@ -451,68 +514,33 @@ static void cppc_check_hisi_workaround(void) ...@@ -451,68 +514,33 @@ static void cppc_check_hisi_workaround(void)
static int __init cppc_cpufreq_init(void) static int __init cppc_cpufreq_init(void)
{ {
struct cppc_cpudata *cpu_data; if ((acpi_disabled) || !acpi_cpc_valid())
int i, ret = 0;
if (acpi_disabled)
return -ENODEV; return -ENODEV;
all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *), INIT_LIST_HEAD(&cpu_data_list);
GFP_KERNEL);
if (!all_cpu_data)
return -ENOMEM;
for_each_possible_cpu(i) {
all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
if (!all_cpu_data[i])
goto out;
cpu_data = all_cpu_data[i];
if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
goto out;
}
ret = acpi_get_psd_map(all_cpu_data);
if (ret) {
pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
goto out;
}
cppc_check_hisi_workaround(); cppc_check_hisi_workaround();
ret = cpufreq_register_driver(&cppc_cpufreq_driver); return cpufreq_register_driver(&cppc_cpufreq_driver);
if (ret) }
goto out;
return ret; static inline void free_cpu_data(void)
{
struct cppc_cpudata *iter, *tmp;
out: list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
for_each_possible_cpu(i) { free_cpumask_var(iter->shared_cpu_map);
cpu_data = all_cpu_data[i]; list_del(&iter->node);
if (!cpu_data) kfree(iter);
break;
free_cpumask_var(cpu_data->shared_cpu_map);
kfree(cpu_data);
} }
kfree(all_cpu_data);
return -ENODEV;
} }
static void __exit cppc_cpufreq_exit(void) static void __exit cppc_cpufreq_exit(void)
{ {
struct cppc_cpudata *cpu_data;
int i;
cpufreq_unregister_driver(&cppc_cpufreq_driver); cpufreq_unregister_driver(&cppc_cpufreq_driver);
for_each_possible_cpu(i) { free_cpu_data();
cpu_data = all_cpu_data[i];
free_cpumask_var(cpu_data->shared_cpu_map);
kfree(cpu_data);
}
kfree(all_cpu_data);
} }
module_exit(cppc_cpufreq_exit); module_exit(cppc_cpufreq_exit);
......
...@@ -2097,6 +2097,46 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, ...@@ -2097,6 +2097,46 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
} }
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch); EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
/**
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
* @cpu: Target CPU.
* @min_perf: Minimum (required) performance level (units of @capacity).
* @target_perf: Terget (desired) performance level (units of @capacity).
* @capacity: Capacity of the target CPU.
*
* Carry out a fast performance level switch of @cpu without sleeping.
*
* The driver's ->adjust_perf() callback invoked by this function must be
* suitable for being called from within RCU-sched read-side critical sections
* and it is expected to select a suitable performance level equal to or above
* @min_perf and preferably equal to or below @target_perf.
*
* This function must not be called if policy->fast_switch_enabled is unset.
*
* Governors calling this function must guarantee that it will never be invoked
* twice in parallel for the same CPU and that it will never be called in
* parallel with either ->target() or ->target_index() or ->fast_switch() for
* the same CPU.
*/
void cpufreq_driver_adjust_perf(unsigned int cpu,
unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity)
{
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
}
/**
* cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
*
* Return 'true' if the ->adjust_perf callback is present for the
* current driver or 'false' otherwise.
*/
bool cpufreq_driver_has_adjust_perf(void)
{
return !!cpufreq_driver->adjust_perf;
}
/* Must set freqs->new to intermediate frequency */ /* Must set freqs->new to intermediate frequency */
static int __target_intermediate(struct cpufreq_policy *policy, static int __target_intermediate(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int index) struct cpufreq_freqs *freqs, int index)
......
...@@ -2207,9 +2207,9 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, ...@@ -2207,9 +2207,9 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
unsigned int policy_min, unsigned int policy_min,
unsigned int policy_max) unsigned int policy_max)
{ {
int max_freq = intel_pstate_get_max_freq(cpu);
int32_t max_policy_perf, min_policy_perf; int32_t max_policy_perf, min_policy_perf;
int max_state, turbo_max; int max_state, turbo_max;
int max_freq;
/* /*
* HWP needs some special consideration, because on BDX the * HWP needs some special consideration, because on BDX the
...@@ -2223,6 +2223,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, ...@@ -2223,6 +2223,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
turbo_max = cpu->pstate.turbo_pstate; turbo_max = cpu->pstate.turbo_pstate;
} }
max_freq = max_state * cpu->pstate.scaling;
max_policy_perf = max_state * policy_max / max_freq; max_policy_perf = max_state * policy_max / max_freq;
if (policy_max == policy_min) { if (policy_max == policy_min) {
...@@ -2325,9 +2326,18 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu, ...@@ -2325,9 +2326,18 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
struct cpufreq_policy_data *policy) struct cpufreq_policy_data *policy)
{ {
int max_freq;
update_turbo_state(); update_turbo_state();
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, if (hwp_active) {
intel_pstate_get_max_freq(cpu)); int max_state, turbo_max;
intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
max_freq = max_state * cpu->pstate.scaling;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
}
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
intel_pstate_adjust_policy_max(cpu, policy); intel_pstate_adjust_policy_max(cpu, policy);
} }
...@@ -2526,20 +2536,19 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in ...@@ -2526,20 +2536,19 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
fp_toint(cpu->iowait_boost * 100)); fp_toint(cpu->iowait_boost * 100));
} }
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate, static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
bool strict, bool fast_switch) u32 desired, bool fast_switch)
{ {
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
value &= ~HWP_MIN_PERF(~0L); value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(target_pstate); value |= HWP_MIN_PERF(min);
/*
* The entire MSR needs to be updated in order to update the HWP min
* field in it, so opportunistically update the max too if needed.
*/
value &= ~HWP_MAX_PERF(~0L); value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio); value |= HWP_MAX_PERF(max);
value &= ~HWP_DESIRED_PERF(~0L);
value |= HWP_DESIRED_PERF(desired);
if (value == prev) if (value == prev)
return; return;
...@@ -2569,11 +2578,15 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, ...@@ -2569,11 +2578,15 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int old_pstate = cpu->pstate.current_pstate; int old_pstate = cpu->pstate.current_pstate;
target_pstate = intel_pstate_prepare_request(cpu, target_pstate); target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
if (hwp_active) if (hwp_active) {
intel_cpufreq_adjust_hwp(cpu, target_pstate, int max_pstate = policy->strict_target ?
policy->strict_target, fast_switch); target_pstate : cpu->max_perf_ratio;
else if (target_pstate != old_pstate)
intel_cpufreq_adjust_hwp(cpu, target_pstate, max_pstate, 0,
fast_switch);
} else if (target_pstate != old_pstate) {
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch); intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
}
cpu->pstate.current_pstate = target_pstate; cpu->pstate.current_pstate = target_pstate;
...@@ -2634,6 +2647,47 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, ...@@ -2634,6 +2647,47 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
return target_pstate * cpu->pstate.scaling; return target_pstate * cpu->pstate.scaling;
} }
static void intel_cpufreq_adjust_perf(unsigned int cpunum,
unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity)
{
struct cpudata *cpu = all_cpu_data[cpunum];
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
update_turbo_state();
cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
cpu->pstate.turbo_pstate;
/* Optimization: Avoid unnecessary divisions. */
target_pstate = cap_pstate;
if (target_perf < capacity)
target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
min_pstate = cap_pstate;
if (min_perf < capacity)
min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
if (min_pstate < cpu->pstate.min_pstate)
min_pstate = cpu->pstate.min_pstate;
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;
target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
intel_cpufreq_adjust_hwp(cpu, min_pstate, max_pstate, target_pstate, true);
cpu->pstate.current_pstate = target_pstate;
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
}
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ {
int max_state, turbo_max, min_freq, max_freq, ret; int max_state, turbo_max, min_freq, max_freq, ret;
...@@ -3032,6 +3086,8 @@ static int __init intel_pstate_init(void) ...@@ -3032,6 +3086,8 @@ static int __init intel_pstate_init(void)
intel_pstate.attr = hwp_cpufreq_attrs; intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs; intel_cpufreq.attr = hwp_cpufreq_attrs;
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
intel_cpufreq.fast_switch = NULL;
intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
if (!default_driver) if (!default_driver)
default_driver = &intel_pstate; default_driver = &intel_pstate;
......
...@@ -124,11 +124,10 @@ struct cppc_perf_fb_ctrs { ...@@ -124,11 +124,10 @@ struct cppc_perf_fb_ctrs {
/* Per CPU container for runtime CPPC management. */ /* Per CPU container for runtime CPPC management. */
struct cppc_cpudata { struct cppc_cpudata {
int cpu; struct list_head node;
struct cppc_perf_caps perf_caps; struct cppc_perf_caps perf_caps;
struct cppc_perf_ctrls perf_ctrls; struct cppc_perf_ctrls perf_ctrls;
struct cppc_perf_fb_ctrs perf_fb_ctrs; struct cppc_perf_fb_ctrs perf_fb_ctrs;
struct cpufreq_policy *cur_policy;
unsigned int shared_type; unsigned int shared_type;
cpumask_var_t shared_cpu_map; cpumask_var_t shared_cpu_map;
}; };
...@@ -137,7 +136,8 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf); ...@@ -137,7 +136,8 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern int acpi_get_psd_map(struct cppc_cpudata **); extern bool acpi_cpc_valid(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
extern unsigned int cppc_get_transition_latency(int cpu); extern unsigned int cppc_get_transition_latency(int cpu);
extern bool cpc_ffh_supported(void); extern bool cpc_ffh_supported(void);
extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
......
...@@ -320,6 +320,15 @@ struct cpufreq_driver { ...@@ -320,6 +320,15 @@ struct cpufreq_driver {
unsigned int index); unsigned int index);
unsigned int (*fast_switch)(struct cpufreq_policy *policy, unsigned int (*fast_switch)(struct cpufreq_policy *policy,
unsigned int target_freq); unsigned int target_freq);
/*
* ->fast_switch() replacement for drivers that use an internal
* representation of performance levels and can pass hints other than
* the target performance level to the hardware.
*/
void (*adjust_perf)(unsigned int cpu,
unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity);
/* /*
* Caches and returns the lowest driver-supported frequency greater than * Caches and returns the lowest driver-supported frequency greater than
...@@ -588,6 +597,11 @@ struct cpufreq_governor { ...@@ -588,6 +597,11 @@ struct cpufreq_governor {
/* Pass a target to the cpufreq driver */ /* Pass a target to the cpufreq driver */
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq); unsigned int target_freq);
void cpufreq_driver_adjust_perf(unsigned int cpu,
unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity);
bool cpufreq_driver_has_adjust_perf(void);
int cpufreq_driver_target(struct cpufreq_policy *policy, int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation); unsigned int relation);
......
...@@ -28,6 +28,11 @@ static inline unsigned long map_util_freq(unsigned long util, ...@@ -28,6 +28,11 @@ static inline unsigned long map_util_freq(unsigned long util,
{ {
return (freq + (freq >> 2)) * util / cap; return (freq + (freq >> 2)) * util / cap;
} }
static inline unsigned long map_util_perf(unsigned long util)
{
return util + (util >> 2);
}
#endif /* CONFIG_CPU_FREQ */ #endif /* CONFIG_CPU_FREQ */
#endif /* _LINUX_SCHED_CPUFREQ_H */ #endif /* _LINUX_SCHED_CPUFREQ_H */
...@@ -53,6 +53,7 @@ struct sugov_cpu { ...@@ -53,6 +53,7 @@ struct sugov_cpu {
unsigned int iowait_boost; unsigned int iowait_boost;
u64 last_update; u64 last_update;
unsigned long util;
unsigned long bw_dl; unsigned long bw_dl;
unsigned long max; unsigned long max;
...@@ -276,16 +277,15 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, ...@@ -276,16 +277,15 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
return min(max, util); return min(max, util);
} }
static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) static void sugov_get_util(struct sugov_cpu *sg_cpu)
{ {
struct rq *rq = cpu_rq(sg_cpu->cpu); struct rq *rq = cpu_rq(sg_cpu->cpu);
unsigned long util = cpu_util_cfs(rq);
unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->max = max; sg_cpu->max = max;
sg_cpu->bw_dl = cpu_bw_dl(rq); sg_cpu->bw_dl = cpu_bw_dl(rq);
sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL); FREQUENCY_UTIL, NULL);
} }
/** /**
...@@ -362,8 +362,6 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, ...@@ -362,8 +362,6 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* sugov_iowait_apply() - Apply the IO boost to a CPU. * sugov_iowait_apply() - Apply the IO boost to a CPU.
* @sg_cpu: the sugov data for the cpu to boost * @sg_cpu: the sugov data for the cpu to boost
* @time: the update time from the caller * @time: the update time from the caller
* @util: the utilization to (eventually) boost
* @max: the maximum value the utilization can be boosted to
* *
* A CPU running a task which woken up after an IO operation can have its * A CPU running a task which woken up after an IO operation can have its
* utilization boosted to speed up the completion of those IO operations. * utilization boosted to speed up the completion of those IO operations.
...@@ -377,18 +375,17 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, ...@@ -377,18 +375,17 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* This mechanism is designed to boost high frequently IO waiting tasks, while * This mechanism is designed to boost high frequently IO waiting tasks, while
* being more conservative on tasks which does sporadic IO operations. * being more conservative on tasks which does sporadic IO operations.
*/ */
static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
unsigned long util, unsigned long max)
{ {
unsigned long boost; unsigned long boost;
/* No boost currently required */ /* No boost currently required */
if (!sg_cpu->iowait_boost) if (!sg_cpu->iowait_boost)
return util; return;
/* Reset boost if the CPU appears to have been idle enough */ /* Reset boost if the CPU appears to have been idle enough */
if (sugov_iowait_reset(sg_cpu, time, false)) if (sugov_iowait_reset(sg_cpu, time, false))
return util; return;
if (!sg_cpu->iowait_boost_pending) { if (!sg_cpu->iowait_boost_pending) {
/* /*
...@@ -397,18 +394,19 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, ...@@ -397,18 +394,19 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
sg_cpu->iowait_boost >>= 1; sg_cpu->iowait_boost >>= 1;
if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu->iowait_boost = 0; sg_cpu->iowait_boost = 0;
return util; return;
} }
} }
sg_cpu->iowait_boost_pending = false; sg_cpu->iowait_boost_pending = false;
/* /*
* @util is already in capacity scale; convert iowait_boost * sg_cpu->util is already in capacity scale; convert iowait_boost
* into the same scale so we can compare. * into the same scale so we can compare.
*/ */
boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT; boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
return max(boost, util); if (sg_cpu->util < boost)
sg_cpu->util = boost;
} }
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
...@@ -434,14 +432,10 @@ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_p ...@@ -434,14 +432,10 @@ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_p
sg_policy->limits_changed = true; sg_policy->limits_changed = true;
} }
static void sugov_update_single(struct update_util_data *hook, u64 time, static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
unsigned int flags) u64 time, unsigned int flags)
{ {
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long util, max;
unsigned int next_f;
unsigned int cached_freq = sg_policy->cached_raw_freq;
sugov_iowait_boost(sg_cpu, time, flags); sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time; sg_cpu->last_update = time;
...@@ -449,12 +443,26 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -449,12 +443,26 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
ignore_dl_rate_limit(sg_cpu, sg_policy); ignore_dl_rate_limit(sg_cpu, sg_policy);
if (!sugov_should_update_freq(sg_policy, time)) if (!sugov_should_update_freq(sg_policy, time))
return false;
sugov_get_util(sg_cpu);
sugov_iowait_apply(sg_cpu, time);
return true;
}
static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned int cached_freq = sg_policy->cached_raw_freq;
unsigned int next_f;
if (!sugov_update_single_common(sg_cpu, time, flags))
return; return;
util = sugov_get_util(sg_cpu); next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
max = sg_cpu->max;
util = sugov_iowait_apply(sg_cpu, time, util, max);
next_f = get_next_freq(sg_policy, util, max);
/* /*
* Do not reduce the frequency if the CPU has not been idle * Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then. * recently, as the reduction is likely to be premature then.
...@@ -480,6 +488,38 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -480,6 +488,38 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
} }
} }
static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
unsigned long prev_util = sg_cpu->util;
/*
* Fall back to the "frequency" path if frequency invariance is not
* supported, because the direct mapping between the utilization and
* the performance levels depends on the frequency invariance.
*/
if (!arch_scale_freq_invariant()) {
sugov_update_single_freq(hook, time, flags);
return;
}
if (!sugov_update_single_common(sg_cpu, time, flags))
return;
/*
* Do not reduce the target performance level if the CPU has not been
* idle recently, as the reduction is likely to be premature then.
*/
if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
sg_cpu->util = prev_util;
cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
map_util_perf(sg_cpu->util), sg_cpu->max);
sg_cpu->sg_policy->last_freq_update_time = time;
}
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{ {
struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct sugov_policy *sg_policy = sg_cpu->sg_policy;
...@@ -491,9 +531,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) ...@@ -491,9 +531,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max; unsigned long j_util, j_max;
j_util = sugov_get_util(j_sg_cpu); sugov_get_util(j_sg_cpu);
sugov_iowait_apply(j_sg_cpu, time);
j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max; j_max = j_sg_cpu->max;
j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
if (j_util * max > j_max * util) { if (j_util * max > j_max * util) {
util = j_util; util = j_util;
...@@ -817,6 +858,7 @@ static void sugov_exit(struct cpufreq_policy *policy) ...@@ -817,6 +858,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
static int sugov_start(struct cpufreq_policy *policy) static int sugov_start(struct cpufreq_policy *policy)
{ {
struct sugov_policy *sg_policy = policy->governor_data; struct sugov_policy *sg_policy = policy->governor_data;
void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
unsigned int cpu; unsigned int cpu;
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
...@@ -836,13 +878,17 @@ static int sugov_start(struct cpufreq_policy *policy) ...@@ -836,13 +878,17 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_cpu->sg_policy = sg_policy; sg_cpu->sg_policy = sg_policy;
} }
if (policy_is_shared(policy))
uu = sugov_update_shared;
else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
uu = sugov_update_single_perf;
else
uu = sugov_update_single_freq;
for_each_cpu(cpu, policy->cpus) { for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
policy_is_shared(policy) ?
sugov_update_shared :
sugov_update_single);
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment