Commit 0bed612b authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

cpufreq: sched: Helpers to add and remove update_util hooks

Replace the single helper for adding and removing cpufreq utilization
update hooks, cpufreq_set_update_util_data(), with a pair of helpers,
cpufreq_add_update_util_hook() and cpufreq_remove_update_util_hook(),
and modify the users of cpufreq_set_update_util_data() accordingly.

With the new helpers, the code using them doesn't need to worry
about the internals of struct update_util_data and in particular
it doesn't need to worry about populating the func field in it
properly upfront.
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
parent 9fa64d64
...@@ -258,43 +258,6 @@ unsigned int dbs_update(struct cpufreq_policy *policy) ...@@ -258,43 +258,6 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
} }
EXPORT_SYMBOL_GPL(dbs_update); EXPORT_SYMBOL_GPL(dbs_update);
static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
unsigned int delay_us)
{
struct cpufreq_policy *policy = policy_dbs->policy;
int cpu;
gov_update_sample_delay(policy_dbs, delay_us);
policy_dbs->last_sample_time = 0;
for_each_cpu(cpu, policy->cpus) {
struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
cpufreq_set_update_util_data(cpu, &cdbs->update_util);
}
}
static inline void gov_clear_update_util(struct cpufreq_policy *policy)
{
int i;
for_each_cpu(i, policy->cpus)
cpufreq_set_update_util_data(i, NULL);
synchronize_sched();
}
static void gov_cancel_work(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
gov_clear_update_util(policy_dbs->policy);
irq_work_sync(&policy_dbs->irq_work);
cancel_work_sync(&policy_dbs->work);
atomic_set(&policy_dbs->work_count, 0);
policy_dbs->work_in_progress = false;
}
static void dbs_work_handler(struct work_struct *work) static void dbs_work_handler(struct work_struct *work)
{ {
struct policy_dbs_info *policy_dbs; struct policy_dbs_info *policy_dbs;
...@@ -382,6 +345,44 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, ...@@ -382,6 +345,44 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
irq_work_queue(&policy_dbs->irq_work); irq_work_queue(&policy_dbs->irq_work);
} }
static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
unsigned int delay_us)
{
struct cpufreq_policy *policy = policy_dbs->policy;
int cpu;
gov_update_sample_delay(policy_dbs, delay_us);
policy_dbs->last_sample_time = 0;
for_each_cpu(cpu, policy->cpus) {
struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
dbs_update_util_handler);
}
}
static inline void gov_clear_update_util(struct cpufreq_policy *policy)
{
int i;
for_each_cpu(i, policy->cpus)
cpufreq_remove_update_util_hook(i);
synchronize_sched();
}
static void gov_cancel_work(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
gov_clear_update_util(policy_dbs->policy);
irq_work_sync(&policy_dbs->irq_work);
cancel_work_sync(&policy_dbs->work);
atomic_set(&policy_dbs->work_count, 0);
policy_dbs->work_in_progress = false;
}
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
struct dbs_governor *gov) struct dbs_governor *gov)
{ {
...@@ -404,7 +405,6 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli ...@@ -404,7 +405,6 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
j_cdbs->policy_dbs = policy_dbs; j_cdbs->policy_dbs = policy_dbs;
j_cdbs->update_util.func = dbs_update_util_handler;
} }
return policy_dbs; return policy_dbs;
} }
......
...@@ -1107,8 +1107,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) ...@@ -1107,8 +1107,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_busy_pid_reset(cpu); intel_pstate_busy_pid_reset(cpu);
cpu->update_util.func = intel_pstate_update_util;
pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
return 0; return 0;
...@@ -1132,12 +1130,13 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num) ...@@ -1132,12 +1130,13 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
/* Prevent intel_pstate_update_util() from using stale data. */ /* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0; cpu->sample.time = 0;
cpufreq_set_update_util_data(cpu_num, &cpu->update_util); cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
intel_pstate_update_util);
} }
static void intel_pstate_clear_update_util_hook(unsigned int cpu) static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{ {
cpufreq_set_update_util_data(cpu, NULL); cpufreq_remove_update_util_hook(cpu);
synchronize_sched(); synchronize_sched();
} }
......
...@@ -3240,7 +3240,10 @@ struct update_util_data { ...@@ -3240,7 +3240,10 @@ struct update_util_data {
u64 time, unsigned long util, unsigned long max); u64 time, unsigned long util, unsigned long max);
}; };
void cpufreq_set_update_util_data(int cpu, struct update_util_data *data); void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned long util, unsigned long max));
void cpufreq_remove_update_util_hook(int cpu);
#endif /* CONFIG_CPU_FREQ */ #endif /* CONFIG_CPU_FREQ */
#endif #endif
...@@ -14,24 +14,50 @@ ...@@ -14,24 +14,50 @@
DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
/** /**
* cpufreq_set_update_util_data - Populate the CPU's update_util_data pointer. * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
* @cpu: The CPU to set the pointer for. * @cpu: The CPU to set the pointer for.
* @data: New pointer value. * @data: New pointer value.
* @func: Callback function to set for the CPU.
* *
* Set and publish the update_util_data pointer for the given CPU. That pointer * Set and publish the update_util_data pointer for the given CPU.
* points to a struct update_util_data object containing a callback function
* to call from cpufreq_update_util(). That function will be called from an RCU
* read-side critical section, so it must not sleep.
* *
* Callers must use RCU-sched callbacks to free any memory that might be * The update_util_data pointer of @cpu is set to @data and the callback
* accessed via the old update_util_data pointer or invoke synchronize_sched() * function pointer in the target struct update_util_data is set to @func.
* right after this function to avoid use-after-free. * That function will be called by cpufreq_update_util() from RCU-sched
* read-side critical sections, so it must not sleep. @data will always be
* passed to it as the first argument which allows the function to get to the
* target update_util_data structure and its container.
*
* The update_util_data pointer of @cpu must be NULL when this function is
* called or it will WARN() and return with no effect.
*/ */
void cpufreq_set_update_util_data(int cpu, struct update_util_data *data) void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned long util, unsigned long max))
{ {
if (WARN_ON(data && !data->func)) if (WARN_ON(!data || !func))
return; return;
if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
return;
data->func = func;
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data); rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
} }
EXPORT_SYMBOL_GPL(cpufreq_set_update_util_data); EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
/**
* cpufreq_remove_update_util_hook - Clear the CPU's update_util_data pointer.
* @cpu: The CPU to clear the pointer for.
*
* Clear the update_util_data pointer for the given CPU.
*
* Callers must use RCU-sched callbacks to free any memory that might be
* accessed via the old update_util_data pointer or invoke synchronize_sched()
* right after this function to avoid use-after-free.
*/
void cpufreq_remove_update_util_hook(int cpu)
{
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment