Commit 00f0340a authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.arm.linux.org.uk/linux-2.6-rmk

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 45ea3843 ca6c5d1f
No related merge requests found
......@@ -38,6 +38,17 @@
#define PFX "longhaul: "
#define TYPE_LONGHAUL_V1 1
#define TYPE_LONGHAUL_V2 2
#define TYPE_POWERSAVER 3
#define CPU_SAMUEL 1
#define CPU_SAMUEL2 2
#define CPU_EZRA 3
#define CPU_EZRA_T 4
#define CPU_NEHEMIAH 5
static int cpu_model;
static unsigned int numscales=16, numvscales;
static unsigned int fsb;
static int minvid, maxvid;
......@@ -73,9 +84,23 @@ static int voltage_table[32];
static unsigned int highest_speed, lowest_speed; /* kHz */
static int longhaul_version;
static struct cpufreq_frequency_table *longhaul_table;
static char speedbuffer[8];
static char *print_speed(int speed)
{
if (speed > 1000) {
if (speed%1000 == 0)
sprintf (speedbuffer, "%dGHz", speed/1000);
else
sprintf (speedbuffer, "%d.%dGHz", speed/1000, (speed%1000)/100);
} else
sprintf (speedbuffer, "%dMHz", speed);
return speedbuffer;
}
static unsigned int calc_speed(int mult, int fsb)
static unsigned int calc_speed(int mult)
{
int khz;
khz = (mult/10)*fsb;
......@@ -92,7 +117,7 @@ static int longhaul_get_cpu_mult(void)
rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22;
if (longhaul_version==2 || longhaul_version==3) {
if (longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) {
if (lo & (1<<27))
invalue+=16;
}
......@@ -101,8 +126,21 @@ static int longhaul_get_cpu_mult(void)
static void do_powersaver(union msr_longhaul *longhaul,
unsigned int clock_ratio_index, int version)
unsigned int clock_ratio_index)
{
int version;
switch (cpu_model) {
case CPU_EZRA_T:
version = 3;
break;
case CPU_NEHEMIAH:
version = 0xf;
break;
default:
return;
}
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
......@@ -125,7 +163,7 @@ static void do_powersaver(union msr_longhaul *longhaul,
* longhaul_set_cpu_frequency()
* @clock_ratio_index : bitpattern of the new multiplier.
*
* Sets a new clock ratio, and -if applicable- a new Front Side Bus
* Sets a new clock ratio.
*/
static void longhaul_setstate(unsigned int clock_ratio_index)
......@@ -134,22 +172,28 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
struct cpufreq_freqs freqs;
union msr_longhaul longhaul;
union msr_bcr2 bcr2;
static unsigned int old_ratio=-1;
if (old_ratio == clock_ratio_index)
return;
old_ratio = clock_ratio_index;
mult = clock_ratio[clock_ratio_index];
if (mult == -1)
return;
speed = calc_speed (mult, fsb);
speed = calc_speed(mult);
if ((speed > highest_speed) || (speed < lowest_speed))
return;
freqs.old = calc_speed (longhaul_get_cpu_mult(), fsb);
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
freqs.cpu = 0; /* longhaul.c is UP only driver */
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
dprintk (KERN_INFO PFX "FSB:%d Mult:%d.%dx\n", fsb, mult/10, mult%10);
dprintk (KERN_INFO PFX "Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
switch (longhaul_version) {
......@@ -160,7 +204,8 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
* *NB* Until we get voltage scaling working v1 & v2 are the same code.
* Longhaul v2 appears in Samuel2 Steppings 1->7 [C5b] and Ezra [C5C]
*/
case 1:
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
rdmsrl (MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
......@@ -180,26 +225,18 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
break;
/*
* Longhaul v3 (aka Powersaver). (Ezra-T [C5M])
* Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
* We can scale voltage with this too, but that's currently
* disabled until we come up with a decent 'match freq to voltage'
* algorithm.
* When we add voltage scaling, we will also need to do the
* voltage/freq setting in order depending on the direction
* of scaling (like we do in powernow-k7.c)
*/
case 2:
do_powersaver(&longhaul, clock_ratio_index, 3);
break;
/*
* Powersaver. (Nehemiah [C5N])
* As for Ezra-T, we don't do voltage yet.
* This can do FSB scaling too, but it has never been proven
* Nehemiah can do FSB scaling too, but this has never been proven
* to work in practice.
*/
case 3:
do_powersaver(&longhaul, clock_ratio_index, 0xf);
case TYPE_POWERSAVER:
do_powersaver(&longhaul, clock_ratio_index);
break;
}
......@@ -249,7 +286,6 @@ static int guess_fsb(void)
static int __init longhaul_get_ranges(void)
{
struct cpuinfo_x86 *c = cpu_data;
unsigned long invalue;
unsigned int multipliers[32]= {
50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65,
......@@ -261,62 +297,66 @@ static int __init longhaul_get_ranges(void)
unsigned int eblcr_fsb_table_v2[] = { 133, 100, -1, 66 };
switch (longhaul_version) {
case 1:
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
/* Ugh, Longhaul v1 didn't have the min/max MSRs.
Assume min=3.0x & max = whatever we booted at. */
minmult = 30;
maxmult = longhaul_get_cpu_mult();
rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
invalue = (lo & (1<<18|1<<19)) >>18;
if (c->x86_model==6)
if (cpu_model==CPU_SAMUEL || cpu_model==CPU_SAMUEL2)
fsb = eblcr_fsb_table_v1[invalue];
else
fsb = guess_fsb();
break;
case 2:
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
invalue = longhaul.bits.MaxMHzBR;
if (longhaul.bits.MaxMHzBR4)
invalue += 16;
maxmult=multipliers[invalue];
invalue = longhaul.bits.MinMHzBR;
if (longhaul.bits.MinMHzBR4 == 1)
minmult = 30;
else
minmult = multipliers[invalue];
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break;
case 3:
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
/*
* TODO: This code works, but raises a lot of questions.
* - Some Nehemiah's seem to have broken Min/MaxMHzBR's.
* We get around this by using a hardcoded multiplier of 5.0x
* for the minimimum speed, and the speed we booted up at for the max.
* This is done in longhaul_get_cpu_mult() by reading the EBLCR register.
* - According to some VIA documentation EBLCR is only
* in pre-Nehemiah C3s. How this still works is a mystery.
* We're possibly using something undocumented and unsupported,
* But it works, so we don't grumble.
*/
minmult=50;
maxmult=longhaul_get_cpu_mult();
/* Starting with the 1.2GHz parts, theres a 200MHz bus. */
if ((cpu_khz/1000) > 1200)
fsb = 200;
else
case TYPE_POWERSAVER:
/* Ezra-T */
if (cpu_model==CPU_EZRA_T) {
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
invalue = longhaul.bits.MaxMHzBR;
if (longhaul.bits.MaxMHzBR4)
invalue += 16;
maxmult=multipliers[invalue];
invalue = longhaul.bits.MinMHzBR;
if (longhaul.bits.MinMHzBR4 == 1)
minmult = 30;
else
minmult = multipliers[invalue];
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break;
break;
}
/* Nehemiah */
if (cpu_model==CPU_NEHEMIAH) {
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
/*
* TODO: This code works, but raises a lot of questions.
* - Some Nehemiah's seem to have broken Min/MaxMHzBR's.
* We get around this by using a hardcoded multiplier of 4.0x
* for the minimimum speed, and the speed we booted up at for the max.
* This is done in longhaul_get_cpu_mult() by reading the EBLCR register.
* - According to some VIA documentation EBLCR is only
* in pre-Nehemiah C3s. How this still works is a mystery.
* We're possibly using something undocumented and unsupported,
* But it works, so we don't grumble.
*/
minmult=40;
maxmult=longhaul_get_cpu_mult();
/* Starting with the 1.2GHz parts, theres a 200MHz bus. */
if ((cpu_khz/1000) > 1200)
fsb = 200;
else
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break;
}
}
dprintk (KERN_INFO PFX "MinMult=%d.%dx MaxMult=%d.%dx\n",
dprintk (KERN_INFO PFX "MinMult:%d.%dx MaxMult:%d.%dx\n",
minmult/10, minmult%10, maxmult/10, maxmult%10);
if (fsb == -1) {
......@@ -324,10 +364,11 @@ static int __init longhaul_get_ranges(void)
return -EINVAL;
}
highest_speed = calc_speed (maxmult, fsb);
lowest_speed = calc_speed (minmult,fsb);
dprintk (KERN_INFO PFX "FSB: %dMHz Lowestspeed=%dMHz Highestspeed=%dMHz\n",
fsb, lowest_speed/1000, highest_speed/1000);
highest_speed = calc_speed(maxmult);
lowest_speed = calc_speed(minmult);
dprintk (KERN_INFO PFX "FSB:%dMHz ", fsb);
dprintk ("Lowest speed:%s ", print_speed(lowest_speed/1000));
dprintk ("Highest speed:%s\n", print_speed(highest_speed/1000));
if (lowest_speed == highest_speed) {
printk (KERN_INFO PFX "highestspeed == lowest, aborting.\n");
......@@ -350,7 +391,7 @@ static int __init longhaul_get_ranges(void)
continue;
if (ratio > maxmult || ratio < minmult)
continue;
longhaul_table[k].frequency = calc_speed (ratio, fsb);
longhaul_table[k].frequency = calc_speed(ratio);
longhaul_table[k].index = j;
k++;
}
......@@ -426,8 +467,7 @@ static int longhaul_verify(struct cpufreq_policy *policy)
static int longhaul_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
unsigned int target_freq, unsigned int relation)
{
unsigned int table_index = 0;
unsigned int new_clock_ratio = 0;
......@@ -442,13 +482,15 @@ static int longhaul_target(struct cpufreq_policy *policy,
return 0;
}
static unsigned int longhaul_get(unsigned int cpu)
{
if (cpu)
return 0;
return (calc_speed (longhaul_get_cpu_mult(), fsb));
return calc_speed(longhaul_get_cpu_mult());
}
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = cpu_data;
......@@ -457,26 +499,31 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
switch (c->x86_model) {
case 6:
cpu_model = CPU_SAMUEL;
cpuname = "C3 'Samuel' [C5A]";
longhaul_version=1;
longhaul_version = TYPE_LONGHAUL_V1;
memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio));
memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr));
break;
case 7: /* C5B / C5C */
longhaul_version=1;
case 7:
longhaul_version = TYPE_LONGHAUL_V1;
switch (c->x86_mask) {
case 0:
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
/* Note, this is not a typo, early Samuel2's had Samuel1 ratios. */
memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio));
memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr));
break;
case 1 ... 15:
if (c->x86_mask < 8)
if (c->x86_mask < 8) {
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
else
} else {
cpu_model = CPU_EZRA;
cpuname = "C3 'Ezra' [C5C]";
}
memcpy (clock_ratio, ezra_clock_ratio, sizeof(ezra_clock_ratio));
memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr));
break;
......@@ -484,15 +531,17 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
break;
case 8:
cpu_model = CPU_EZRA_T;
cpuname = "C3 'Ezra-T' [C5M]";
longhaul_version=2;
longhaul_version = TYPE_POWERSAVER;
numscales=32;
memcpy (clock_ratio, ezrat_clock_ratio, sizeof(ezrat_clock_ratio));
memcpy (eblcr_table, ezrat_eblcr, sizeof(ezrat_eblcr));
break;
case 9:
longhaul_version=3;
cpu_model = CPU_NEHEMIAH;
longhaul_version = TYPE_POWERSAVER;
numscales=32;
switch (c->x86_mask) {
case 0 ... 1:
......@@ -518,19 +567,28 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
break;
}
printk (KERN_INFO PFX "VIA %s CPU detected. Longhaul v%d supported.\n",
cpuname, longhaul_version);
printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
switch (longhaul_version) {
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
printk ("Longhaul v%d supported.\n", longhaul_version);
break;
case TYPE_POWERSAVER:
printk ("Powersaver supported.\n");
break;
};
ret = longhaul_get_ranges();
if (ret != 0)
return ret;
if ((longhaul_version==2) && (dont_scale_voltage==0))
if ((longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) &&
(dont_scale_voltage==0))
longhaul_setup_voltagescaling();
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = calc_speed (longhaul_get_cpu_mult(), fsb);
policy->cur = calc_speed(longhaul_get_cpu_mult());
ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
if (ret)
......@@ -563,6 +621,7 @@ static struct cpufreq_driver longhaul_driver = {
.attr = longhaul_attr,
};
static int __init longhaul_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
......@@ -580,16 +639,17 @@ static int __init longhaul_init(void)
return -ENODEV;
}
static void __exit longhaul_exit(void)
{
int i=0;
unsigned int new_clock_ratio;
while (clock_ratio[i] != maxmult)
i++;
new_clock_ratio = longhaul_table[i].index & 0xFF;
longhaul_setstate(new_clock_ratio);
for (i=0; i < numscales; i++) {
if (clock_ratio[i] == maxmult) {
longhaul_setstate(i);
break;
}
}
cpufreq_unregister_driver(&longhaul_driver);
kfree(longhaul_table);
......
......@@ -69,6 +69,21 @@ config CPU_FREQ_GOV_USERSPACE
If in doubt, say Y.
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
depends on CPU_FREQ
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
The governor does a periodic polling and
changes frequency based on the CPU utilization.
The support for this governor depends on CPU capability to
do fast frequency switching (i.e, very low latency frequency
transitions).
For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
config CPU_FREQ_24_API
bool "/proc/sys/cpu/ interface (2.4. / OLD)"
depends on CPU_FREQ && SYSCTL && CPU_FREQ_GOV_USERSPACE
......
......@@ -5,6 +5,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
......
/*
* drivers/cpufreq/cpufreq_ondemand.c
*
* Copyright (C) 2001 Russell King
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
* Jun Nakajima <jun.nakajima@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/cpufreq.h>
#include <linux/sysctl.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/config.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
/*
* dbs is used in this file as a shortform for demandbased switching
* It helps to keep variable names smaller, simpler
*/
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define MIN_FREQUENCY_UP_THRESHOLD (0)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
/*
* The polling frequency of this governor depends on the capability of
* the processor. Default polling frequency is 1000 times the transition
* latency of the processor. The governor will work on any processor with
* transition latency <= 10mS, using appropriate sampling
* rate.
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
* this governor will not work.
* All times here are in uS.
*/
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
#define sampling_rate_in_HZ(x) ((x * HZ) / (1000 * 1000))
static void do_dbs_timer(void *data);
struct cpu_dbs_info_s {
struct cpufreq_policy *cur_policy;
unsigned int prev_cpu_idle_up;
unsigned int prev_cpu_idle_down;
unsigned int enable;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
static DECLARE_MUTEX (dbs_sem);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int down_threshold;
};
struct dbs_tuners dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
/************************** sysfs interface ************************/
static ssize_t show_current_freq(struct cpufreq_policy *policy, char *buf)
{
return sprintf (buf, "%u\n", policy->cur);
}
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
}
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{
return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
}
#define define_one_ro(_name) \
static struct freq_attr _name = { \
.attr = { .name = __stringify(_name), .mode = 0444 }, \
.show = show_##_name, \
}
define_one_ro(current_freq);
define_one_ro(sampling_rate_max);
define_one_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy *unused, char *buf) \
{ \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
show_one(sampling_rate, sampling_rate);
show_one(sampling_down_factor, sampling_down_factor);
show_one(up_threshold, up_threshold);
show_one(down_threshold, down_threshold);
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 )
goto out;
dbs_tuners_ins.sampling_down_factor = input;
out:
up(&dbs_sem);
return count;
}
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE)
goto out;
dbs_tuners_ins.sampling_rate = input;
out:
up(&dbs_sem);
return count;
}
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD ||
input <= dbs_tuners_ins.down_threshold)
goto out;
dbs_tuners_ins.up_threshold = input;
out:
up(&dbs_sem);
return count;
}
static ssize_t store_down_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
input < MIN_FREQUENCY_DOWN_THRESHOLD ||
input >= dbs_tuners_ins.up_threshold)
goto out;
dbs_tuners_ins.down_threshold = input;
out:
up(&dbs_sem);
return count;
}
#define define_one_rw(_name) \
static struct freq_attr _name = { \
.attr = { .name = __stringify(_name), .mode = 0644 }, \
.show = show_##_name, \
.store = store_##_name, \
}
define_one_rw(sampling_rate);
define_one_rw(sampling_down_factor);
define_one_rw(up_threshold);
define_one_rw(down_threshold);
static struct attribute * dbs_attributes[] = {
&current_freq.attr,
&sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
&down_threshold.attr,
NULL
};
static struct attribute_group dbs_attr_group = {
.attrs = dbs_attributes,
.name = "ondemand",
};
/************************** sysfs end ************************/
static void dbs_check_cpu(int cpu)
{
unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
unsigned int freq_down_step;
unsigned int freq_down_sampling_rate;
static int down_skip[NR_CPUS];
struct cpu_dbs_info_s *this_dbs_info;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
if (!this_dbs_info->enable)
return;
/*
* The default safe range is 20% to 80%
* Every sampling_rate, we check
* - If current idle time is less than 20%, then we try to
* increase frequency
* Every sampling_rate*sampling_down_factor, we check
* - If current idle time is more than 80%, then we try to
* decrease frequency
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
* 5% of max_frequency
*/
/* Check for frequency increase */
idle_ticks = kstat_cpu(cpu).cpustat.idle -
this_dbs_info->prev_cpu_idle_up;
this_dbs_info->prev_cpu_idle_up = kstat_cpu(cpu).cpustat.idle;
up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate) / 100;
if (idle_ticks < up_idle_ticks) {
__cpufreq_driver_target(this_dbs_info->cur_policy,
this_dbs_info->cur_policy->max,
CPUFREQ_RELATION_H);
down_skip[cpu] = 0;
this_dbs_info->prev_cpu_idle_down = kstat_cpu(cpu).cpustat.idle;
return;
}
/* Check for frequency decrease */
down_skip[cpu]++;
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
idle_ticks = kstat_cpu(cpu).cpustat.idle -
this_dbs_info->prev_cpu_idle_down;
down_skip[cpu] = 0;
this_dbs_info->prev_cpu_idle_down = kstat_cpu(cpu).cpustat.idle;
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor;
down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
sampling_rate_in_HZ(freq_down_sampling_rate) / 100;
if (idle_ticks > down_idle_ticks ) {
freq_down_step = (5 * this_dbs_info->cur_policy->max) / 100;
__cpufreq_driver_target(this_dbs_info->cur_policy,
this_dbs_info->cur_policy->cur - freq_down_step,
CPUFREQ_RELATION_H);
return;
}
}
static void do_dbs_timer(void *data)
{
int i;
down(&dbs_sem);
for (i = 0; i < NR_CPUS; i++)
if (cpu_online(i))
dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
up(&dbs_sem);
}
static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_work(&dbs_work);
return;
}
static inline void dbs_timer_exit(void)
{
cancel_delayed_work(&dbs_work);
return;
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
unsigned int cpu = policy->cpu;
struct cpu_dbs_info_s *this_dbs_info;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
switch (event) {
case CPUFREQ_GOV_START:
if ((!cpu_online(cpu)) ||
(!policy->cur))
return -EINVAL;
if (policy->cpuinfo.transition_latency >
(TRANSITION_LATENCY_LIMIT * 1000))
return -EINVAL;
if (this_dbs_info->enable) /* Already enabled */
break;
down(&dbs_sem);
this_dbs_info->cur_policy = policy;
this_dbs_info->prev_cpu_idle_up =
kstat_cpu(cpu).cpustat.idle;
this_dbs_info->prev_cpu_idle_down =
kstat_cpu(cpu).cpustat.idle;
this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group);
dbs_enable++;
/*
* Start the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 1) {
/* policy latency is in nS. Convert it to uS first */
def_sampling_rate = (policy->cpuinfo.transition_latency / 1000) *
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
dbs_tuners_ins.sampling_rate = def_sampling_rate;
dbs_timer_init();
}
up(&dbs_sem);
break;
case CPUFREQ_GOV_STOP:
down(&dbs_sem);
this_dbs_info->enable = 0;
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--;
/*
* Stop the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 0)
dbs_timer_exit();
up(&dbs_sem);
break;
case CPUFREQ_GOV_LIMITS:
down(&dbs_sem);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->max, CPUFREQ_RELATION_H);
else if (policy->min > this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
up(&dbs_sem);
break;
}
return 0;
}
struct cpufreq_governor cpufreq_gov_dbs = {
.name = "ondemand",
.governor = cpufreq_governor_dbs,
.owner = THIS_MODULE,
};
EXPORT_SYMBOL(cpufreq_gov_dbs);
static int __init cpufreq_gov_dbs_init(void)
{
return cpufreq_register_governor(&cpufreq_gov_dbs);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
/* Make sure that the scheduled work is indeed not running */
flush_scheduled_work();
cpufreq_unregister_governor(&cpufreq_gov_dbs);
}
MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
"Low Latency Frequency Transition capable processors");
MODULE_LICENSE ("GPL");
module_init(cpufreq_gov_dbs_init);
module_exit(cpufreq_gov_dbs_exit);
......@@ -29,6 +29,7 @@
#define MANUFACTURER_AMD 0x0001
#define MANUFACTURER_ATMEL 0x001f
#define MANUFACTURER_FUJITSU 0x0004
#define MANUFACTURER_HYUNDAI 0x00AD
#define MANUFACTURER_INTEL 0x0089
#define MANUFACTURER_MACRONIX 0x00C2
#define MANUFACTURER_PMC 0x009D
......@@ -56,6 +57,7 @@
#define AM29F040 0x00A4
#define AM29LV040B 0x004F
#define AM29F032B 0x0041
#define AM29F002T 0x00B0
/* Atmel */
#define AT49BV512 0x0003
......@@ -77,6 +79,8 @@
#define MBM29LV400TC 0x22B9
#define MBM29LV400BC 0x22BA
/* Hyundai */
#define HY29F002T 0x00B0
/* Intel */
#define I28F004B3T 0x00d4
......@@ -106,6 +110,7 @@
#define MX29LV160T 0x22C4
#define MX29LV160B 0x2249
#define MX29F016 0x00AD
#define MX29F002T 0x00B0
#define MX29F004T 0x0045
#define MX29F004B 0x0046
......@@ -506,6 +511,17 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {
ERASEINFO(0x10000,8),
}
}, {
mfr_id: MANUFACTURER_AMD,
dev_id: AM29F002T,
name: "AMD AM29F002T",
DevSize: SIZE_256KiB,
NumEraseRegions: 4,
regions: {ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV512,
......@@ -751,6 +767,17 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
mfr_id: MANUFACTURER_HYUNDAI,
dev_id: HY29F002T,
name: "Hyundai HY29F002T",
DevSize: SIZE_256KiB,
NumEraseRegions: 4,
regions: {ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F004B3B,
......@@ -1134,6 +1161,17 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7),
}
}, {
mfr_id: MANUFACTURER_MACRONIX,
dev_id: MX29F002T,
name: "Macronix MX29F002T",
DevSize: SIZE_256KiB,
NumEraseRegions: 4,
regions: {ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = MANUFACTURER_PMC,
.dev_id = PM49FL002,
......@@ -1570,7 +1608,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 2),
ERASEINFO(0x04000, 1),
}
}
}
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment