Commit 714af069 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] Fix NULL ptr regression in powernow-k8
  [CPUFREQ] Create a blacklist for processors that should not load the acpi-cpufreq module.
  [CPUFREQ] Powernow-k8: Enable more than 2 low P-states
  [CPUFREQ] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site)
  [CPUFREQ] ondemand - Use global sysfs dir for tuning settings
  [CPUFREQ] Introduce global, not per core: /sys/devices/system/cpu/cpufreq
  [CPUFREQ] Bail out of cpufreq_add_dev if the link for a managed CPU got created
  [CPUFREQ] Factor out policy setting from cpufreq_add_dev
  [CPUFREQ] Factor out interface creation from cpufreq_add_dev
  [CPUFREQ] Factor out symlink creation from cpufreq_add_dev
  [CPUFREQ] cleanup up -ENOMEM handling in cpufreq_add_dev
  [CPUFREQ] Reduce scope of cpu_sys_dev in cpufreq_add_dev
  [CPUFREQ] update Doc for cpuinfo_cur_freq and scaling_cur_freq
parents a03fdb76 f0adb134
......@@ -176,7 +176,9 @@ scaling_governor, and by "echoing" the name of another
work on some specific architectures or
processors.
cpuinfo_cur_freq : Current speed of the CPU, in KHz.
cpuinfo_cur_freq : Current frequency of the CPU as obtained from
the hardware, in KHz. This is the frequency
the CPU actually runs at.
scaling_available_frequencies : List of available frequencies, in KHz.
......@@ -196,7 +198,10 @@ related_cpus : List of CPUs that need some sort of frequency
scaling_driver : Hardware driver for cpufreq.
scaling_cur_freq : Current frequency of the CPU, in KHz.
scaling_cur_freq : Current frequency of the CPU as determined by
the governor and cpufreq core, in KHz. This is
the frequency the kernel thinks the CPU runs
at.
If you have selected the "userspace" governor which allows you to
set the CPU operating frequency to a specific value, you can read out
......
......@@ -526,6 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
},
{ }
};
static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
{
/* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
* AL30: A Machine Check Exception (MCE) Occurring during an
* Enhanced Intel SpeedStep Technology Ratio Change May Cause
* Both Processor Cores to Lock Up when HT is enabled*/
if (c->x86_vendor == X86_VENDOR_INTEL) {
if ((c->x86 == 15) &&
(c->x86_model == 6) &&
(c->x86_mask == 8) && smt_capable())
return -ENODEV;
}
return 0;
}
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
......@@ -540,6 +555,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
dprintk("acpi_cpufreq_cpu_init\n");
#ifdef CONFIG_SMP
result = acpi_cpufreq_blacklist(c);
if (result)
return result;
#endif
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
......
......@@ -605,9 +605,10 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
return 0;
}
static void invalidate_entry(struct powernow_k8_data *data, unsigned int entry)
static void invalidate_entry(struct cpufreq_frequency_table *powernow_table,
unsigned int entry)
{
data->powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
}
static void print_basics(struct powernow_k8_data *data)
......@@ -854,6 +855,10 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
goto err_out;
}
/* fill in data */
data->numps = data->acpi_data.state_count;
powernow_k8_acpi_pst_values(data, 0);
if (cpu_family == CPU_HW_PSTATE)
ret_val = fill_powernow_table_pstate(data, powernow_table);
else
......@@ -866,11 +871,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
powernow_table[data->acpi_data.state_count].index = 0;
data->powernow_table = powernow_table;
/* fill in data */
data->numps = data->acpi_data.state_count;
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
print_basics(data);
powernow_k8_acpi_pst_values(data, 0);
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
......@@ -914,13 +916,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
"bad value %d.\n", i, index);
printk(KERN_ERR PFX "Please report to BIOS "
"manufacturer\n");
invalidate_entry(data, i);
invalidate_entry(powernow_table, i);
continue;
}
rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
if (!(hi & HW_PSTATE_VALID_MASK)) {
dprintk("invalid pstate %d, ignoring\n", index);
invalidate_entry(data, i);
invalidate_entry(powernow_table, i);
continue;
}
......@@ -941,7 +943,6 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
struct cpufreq_frequency_table *powernow_table)
{
int i;
int cntlofreq = 0;
for (i = 0; i < data->acpi_data.state_count; i++) {
u32 fid;
......@@ -970,7 +971,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
/* verify frequency is OK */
if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
dprintk("invalid freq %u kHz, ignoring\n", freq);
invalidate_entry(data, i);
invalidate_entry(powernow_table, i);
continue;
}
......@@ -978,38 +979,17 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
* BIOSs are using "off" to indicate invalid */
if (vid == VID_OFF) {
dprintk("invalid vid %u, ignoring\n", vid);
invalidate_entry(data, i);
invalidate_entry(powernow_table, i);
continue;
}
/* verify only 1 entry from the lo frequency table */
if (fid < HI_FID_TABLE_BOTTOM) {
if (cntlofreq) {
/* if both entries are the same,
* ignore this one ... */
if ((freq != powernow_table[cntlofreq].frequency) ||
(index != powernow_table[cntlofreq].index)) {
printk(KERN_ERR PFX
"Too many lo freq table "
"entries\n");
return 1;
}
dprintk("double low frequency table entry, "
"ignoring it.\n");
invalidate_entry(data, i);
continue;
} else
cntlofreq = i;
}
if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
printk(KERN_INFO PFX "invalid freq entries "
"%u kHz vs. %u kHz\n", freq,
(unsigned int)
(data->acpi_data.states[i].core_frequency
* 1000));
invalidate_entry(data, i);
invalidate_entry(powernow_table, i);
continue;
}
}
......
......@@ -61,6 +61,8 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* are concerned with are online after they get the lock.
* - Governor routines that can be called in cpufreq hotplug path should not
* take this sem as top level hotplug notifier handler takes this.
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/
static DEFINE_PER_CPU(int, policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
......@@ -686,6 +688,9 @@ static struct attribute *default_attrs[] = {
NULL
};
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr)
......@@ -756,92 +761,20 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
/**
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
/*
* Returns:
* Negative: Failure
* 0: Success
* Positive: When we have a managed CPU and the sysfs got symlinked
*/
static int cpufreq_add_dev(struct sys_device *sys_dev)
int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
struct sys_device *sys_dev)
{
unsigned int cpu = sys_dev->id;
int ret = 0;
struct cpufreq_policy new_policy;
struct cpufreq_policy *policy;
struct freq_attr **drv_attr;
struct sys_device *cpu_sys_dev;
#ifdef CONFIG_SMP
unsigned long flags;
unsigned int j;
if (cpu_is_offline(cpu))
return 0;
cpufreq_debug_disable_ratelimit();
dprintk("adding CPU %u\n", cpu);
#ifdef CONFIG_SMP
/* check whether a different CPU already registered this
* CPU because it is in the same boat. */
policy = cpufreq_cpu_get(cpu);
if (unlikely(policy)) {
cpufreq_cpu_put(policy);
cpufreq_debug_enable_ratelimit();
return 0;
}
#endif
if (!try_module_get(cpufreq_driver->owner)) {
ret = -EINVAL;
goto module_out;
}
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy) {
ret = -ENOMEM;
goto nomem_out;
}
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
ret = -ENOMEM;
goto err_free_policy;
}
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
ret = -ENOMEM;
goto err_free_cpumask;
}
policy->cpu = cpu;
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(policy_cpu, cpu) = cpu;
ret = (lock_policy_rwsem_write(cpu) < 0);
WARN_ON(ret);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
/* Set governor before ->init, so that driver could check it */
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret = cpufreq_driver->init(policy);
if (ret) {
dprintk("initialization failed\n");
goto err_unlock_policy;
}
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
#ifdef CONFIG_SMP
#ifdef CONFIG_HOTPLUG_CPU
if (per_cpu(cpufreq_cpu_governor, cpu)) {
policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
......@@ -872,9 +805,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
/* Should not go through policy unlock path */
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
ret = -EBUSY;
cpufreq_cpu_put(managed_policy);
goto err_free_cpumask;
return -EBUSY;
}
spin_lock_irqsave(&cpufreq_driver_lock, flags);
......@@ -893,17 +825,62 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
* Call driver->exit() because only the cpu parent of
* the kobj needed to call init().
*/
goto out_driver_exit; /* call driver->exit() */
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
if (!ret)
return 1;
else
return ret;
}
}
#endif
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
return ret;
}
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
/* symlink affected CPUs */
int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy)
{
unsigned int j;
int ret = 0;
for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
struct sys_device *cpu_sys_dev;
if (j == cpu)
continue;
if (!cpu_online(j))
continue;
dprintk("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
cpu_sys_dev = get_cpu_sysdev(j);
ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
"cpufreq");
if (ret) {
cpufreq_cpu_put(managed_policy);
return ret;
}
}
return ret;
}
int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy,
struct sys_device *sys_dev)
{
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
unsigned long flags;
int ret = 0;
unsigned int j;
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
&sys_dev->kobj, "cpufreq");
if (ret)
goto out_driver_exit;
return ret;
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
......@@ -933,28 +910,13 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
/* symlink affected CPUs */
for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
if (j == cpu)
continue;
if (!cpu_online(j))
continue;
dprintk("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
cpu_sys_dev = get_cpu_sysdev(j);
ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
"cpufreq");
if (ret) {
cpufreq_cpu_put(managed_policy);
goto err_out_unregister;
}
}
ret = cpufreq_add_dev_symlink(cpu, policy);
if (ret)
goto err_out_kobj_put;
policy->governor = NULL; /* to assure that the starting sequence is
* run in cpufreq_set_policy */
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
/* assure that the starting sequence is run in __cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
ret = __cpufreq_set_policy(policy, &new_policy);
......@@ -963,8 +925,107 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
if (ret) {
dprintk("setting policy failed\n");
goto err_out_unregister;
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
}
return ret;
err_out_kobj_put:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
return ret;
}
/**
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
static int cpufreq_add_dev(struct sys_device *sys_dev)
{
unsigned int cpu = sys_dev->id;
int ret = 0;
struct cpufreq_policy *policy;
unsigned long flags;
unsigned int j;
if (cpu_is_offline(cpu))
return 0;
cpufreq_debug_disable_ratelimit();
dprintk("adding CPU %u\n", cpu);
#ifdef CONFIG_SMP
/* check whether a different CPU already registered this
* CPU because it is in the same boat. */
policy = cpufreq_cpu_get(cpu);
if (unlikely(policy)) {
cpufreq_cpu_put(policy);
cpufreq_debug_enable_ratelimit();
return 0;
}
#endif
if (!try_module_get(cpufreq_driver->owner)) {
ret = -EINVAL;
goto module_out;
}
ret = -ENOMEM;
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy)
goto nomem_out;
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
goto err_free_policy;
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
policy->cpu = cpu;
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(policy_cpu, cpu) = cpu;
ret = (lock_policy_rwsem_write(cpu) < 0);
WARN_ON(ret);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
/* Set governor before ->init, so that driver could check it */
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret = cpufreq_driver->init(policy);
if (ret) {
dprintk("initialization failed\n");
goto err_unlock_policy;
}
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
if (ret) {
if (ret > 0)
/* This is a managed cpu, symlink created,
exit with 0 */
ret = 0;
goto err_unlock_policy;
}
ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
if (ret)
goto err_out_unregister;
unlock_policy_rwsem_write(cpu);
......@@ -982,14 +1043,9 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
per_cpu(cpufreq_cpu_data, j) = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
err_out_kobj_put:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
out_driver_exit:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_unlock_policy:
unlock_policy_rwsem_write(cpu);
err_free_cpumask:
......@@ -1653,8 +1709,17 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
dprintk("governor switch\n");
/* end old governor */
if (data->governor)
if (data->governor) {
/*
* Need to release the rwsem around governor
* stop due to lock dependency between
* cancel_delayed_work_sync and the read lock
* taken in the delayed work handler.
*/
unlock_policy_rwsem_write(data->cpu);
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
lock_policy_rwsem_write(data->cpu);
}
/* start new governor */
data->governor = policy->governor;
......@@ -1884,7 +1949,11 @@ static int __init cpufreq_core_init(void)
per_cpu(policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
cpufreq_global_kobject = kobject_create_and_add("cpufreq",
&cpu_sysdev_class.kset.kobj);
BUG_ON(!cpufreq_global_kobject);
return 0;
}
core_initcall(cpufreq_core_init);
......@@ -55,6 +55,18 @@ static unsigned int min_sampling_rate;
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
static void do_dbs_timer(struct work_struct *work);
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event);
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static
#endif
struct cpufreq_governor cpufreq_gov_ondemand = {
.name = "ondemand",
.governor = cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
/* Sampling types */
enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
......@@ -207,20 +219,23 @@ static void ondemand_powersave_bias_init(void)
}
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
static ssize_t show_sampling_rate_max(struct kobject *kobj,
struct attribute *attr, char *buf)
{
printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
"sysfs file is deprecated - used by: %s\n", current->comm);
return sprintf(buf, "%u\n", -1U);
}
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
#define define_one_ro(_name) \
static struct freq_attr _name = \
static struct global_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(sampling_rate_max);
......@@ -229,7 +244,7 @@ define_one_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy *unused, char *buf) \
(struct kobject *kobj, struct attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
......@@ -238,7 +253,37 @@ show_one(up_threshold, up_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
/*** delete after deprecation time ***/
#define DEPRECATION_MSG(file_name) \
printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
"interface is deprecated - " #file_name "\n");
#define show_one_old(file_name) \
static ssize_t show_##file_name##_old \
(struct cpufreq_policy *unused, char *buf) \
{ \
printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
"interface is deprecated - " #file_name "\n"); \
return show_##file_name(NULL, NULL, buf); \
}
show_one_old(sampling_rate);
show_one_old(up_threshold);
show_one_old(ignore_nice_load);
show_one_old(powersave_bias);
show_one_old(sampling_rate_min);
show_one_old(sampling_rate_max);
#define define_one_ro_old(object, _name) \
static struct freq_attr object = \
__ATTR(_name, 0444, show_##_name##_old, NULL)
define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
/*** delete after deprecation time ***/
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
unsigned int input;
......@@ -254,7 +299,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
return count;
}
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
unsigned int input;
......@@ -273,7 +318,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
return count;
}
static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
unsigned int input;
......@@ -310,7 +355,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
return count;
}
static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
unsigned int input;
......@@ -332,7 +377,7 @@ static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
}
#define define_one_rw(_name) \
static struct freq_attr _name = \
static struct global_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
......@@ -355,6 +400,47 @@ static struct attribute_group dbs_attr_group = {
.name = "ondemand",
};
/*** delete after deprecation time ***/
#define write_one_old(file_name) \
static ssize_t store_##file_name##_old \
(struct cpufreq_policy *unused, const char *buf, size_t count) \
{ \
printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
"interface is deprecated - " #file_name "\n"); \
return store_##file_name(NULL, NULL, buf, count); \
}
write_one_old(sampling_rate);
write_one_old(up_threshold);
write_one_old(ignore_nice_load);
write_one_old(powersave_bias);
#define define_one_rw_old(object, _name) \
static struct freq_attr object = \
__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
define_one_rw_old(sampling_rate_old, sampling_rate);
define_one_rw_old(up_threshold_old, up_threshold);
define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
define_one_rw_old(powersave_bias_old, powersave_bias);
static struct attribute *dbs_attributes_old[] = {
&sampling_rate_max_old.attr,
&sampling_rate_min_old.attr,
&sampling_rate_old.attr,
&up_threshold_old.attr,
&ignore_nice_load_old.attr,
&powersave_bias_old.attr,
NULL
};
static struct attribute_group dbs_attr_group_old = {
.attrs = dbs_attributes_old,
.name = "ondemand",
};
/*** delete after deprecation time ***/
/************************** sysfs end ************************/
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
......@@ -545,7 +631,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
if (rc) {
mutex_unlock(&dbs_mutex);
return rc;
......@@ -566,13 +652,20 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
this_dbs_info->cpu = cpu;
ondemand_powersave_bias_init_cpu(cpu);
mutex_init(&this_dbs_info->timer_mutex);
/*
* Start the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 1) {
unsigned int latency;
rc = sysfs_create_group(cpufreq_global_kobject,
&dbs_attr_group);
if (rc) {
mutex_unlock(&dbs_mutex);
return rc;
}
/* policy latency is in nS. Convert it to uS first */
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
......@@ -586,6 +679,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
mutex_unlock(&dbs_mutex);
mutex_init(&this_dbs_info->timer_mutex);
dbs_timer_init(this_dbs_info);
break;
......@@ -593,10 +687,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
mutex_unlock(&dbs_mutex);
if (!dbs_enable)
sysfs_remove_group(cpufreq_global_kobject,
&dbs_attr_group);
break;
......@@ -614,16 +711,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
}
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static
#endif
struct cpufreq_governor cpufreq_gov_ondemand = {
.name = "ondemand",
.governor = cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
static int __init cpufreq_gov_dbs_init(void)
{
int err;
......
......@@ -65,6 +65,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
struct cpufreq_governor;
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern struct kobject *cpufreq_global_kobject;
#define CPUFREQ_ETERNAL (-1)
struct cpufreq_cpuinfo {
unsigned int max_freq;
......@@ -274,6 +277,13 @@ struct freq_attr {
ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
};
struct global_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,
struct attribute *attr, char *buf);
ssize_t (*store)(struct kobject *a, struct attribute *b,
const char *c, size_t count);
};
/*********************************************************************
* CPUFREQ 2.6. INTERFACE *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment