Commit dfd64ec8 authored by Dave Jones's avatar Dave Jones

Merge

parents 81fd00e2 f9dfb7b5
...@@ -88,6 +88,11 @@ config X86_POWERNOW_K7 ...@@ -88,6 +88,11 @@ config X86_POWERNOW_K7
If in doubt, say N. If in doubt, say N.
config X86_POWERNOW_K7_ACPI
bool
depends on ((X86_POWERNOW_K7 = "m" && ACPI_PROCESSOR) || (X86_POWERNOW_K7 = "y" && ACPI_PROCESSOR = "y"))
default y
config X86_POWERNOW_K8 config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!" tristate "AMD Opteron/Athlon64 PowerNow!"
depends on CPU_FREQ && EXPERIMENTAL depends on CPU_FREQ && EXPERIMENTAL
...@@ -98,6 +103,11 @@ config X86_POWERNOW_K8 ...@@ -98,6 +103,11 @@ config X86_POWERNOW_K8
If in doubt, say N. If in doubt, say N.
config X86_POWERNOW_K8_ACPI
bool
depends on ((X86_POWERNOW_K8 = "m" && ACPI_PROCESSOR) || (X86_POWERNOW_K8 = "y" && ACPI_PROCESSOR = "y"))
default y
config X86_GX_SUSPMOD config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
depends on CPU_FREQ depends on CPU_FREQ
......
...@@ -5,14 +5,19 @@ ...@@ -5,14 +5,19 @@
* Licensed under the terms of the GNU GPL License version 2. * Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by VIA. * Based upon datasheets & sample CPUs kindly provided by VIA.
* *
* VIA have currently 2 different versions of Longhaul. * VIA have currently 3 different versions of Longhaul.
* Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
* It is present only in Samuel 1, Samuel 2 and Ezra. * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
* Version 2 (Powersaver) uses the POWERSAVER MSR at 0x110a. * Version 2 of longhaul is the same as v1, but adds voltage scaling.
* It is present in Ezra-T, Nehemiah and above. * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C)
* In addition to scaling multiplier, it can also scale voltage. * voltage scaling support has currently been disabled in this driver
* There is provision for scaling FSB too, but this doesn't work * until we have code that gets it right.
* too well in practice. * Version 3 of longhaul got renamed to Powersaver and redesigned
* to use the POWERSAVER MSR at 0x110a.
* It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
* It's pretty much the same feature wise to longhaul v2, though
* there is provision for scaling FSB too, but this doesn't work
* too well in practice so we don't even try to use this.
* *
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/ */
...@@ -95,6 +100,27 @@ static int longhaul_get_cpu_mult(void) ...@@ -95,6 +100,27 @@ static int longhaul_get_cpu_mult(void)
} }
static void do_powersaver(union msr_longhaul *longhaul,
unsigned int clock_ratio_index, int version)
{
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul->bits.EnableSoftBusRatio = 1;
longhaul->bits.RevisionKey = 0;
local_irq_disable();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
local_irq_enable();
__hlt();
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.EnableSoftBusRatio = 0;
longhaul->bits.RevisionKey = version;
local_irq_disable();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
local_irq_enable();
}
/** /**
* longhaul_set_cpu_frequency() * longhaul_set_cpu_frequency()
* @clock_ratio_index : bitpattern of the new multiplier. * @clock_ratio_index : bitpattern of the new multiplier.
...@@ -126,61 +152,54 @@ static void longhaul_setstate(unsigned int clock_ratio_index) ...@@ -126,61 +152,54 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
dprintk (KERN_INFO PFX "FSB:%d Mult:%d.%dx\n", fsb, mult/10, mult%10); dprintk (KERN_INFO PFX "FSB:%d Mult:%d.%dx\n", fsb, mult/10, mult%10);
switch (longhaul_version) { switch (longhaul_version) {
/*
* Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
* Software controlled multipliers only.
*
* *NB* Until we get voltage scaling working v1 & v2 are the same code.
* Longhaul v2 appears in Samuel2 Steppings 1->7 [C5b] and Ezra [C5C]
*/
case 1: case 1:
rdmsrl (MSR_VIA_BCR2, bcr2.val); rdmsrl (MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */ /* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1; bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = clock_ratio_index; bcr2.bits.CLOCKMUL = clock_ratio_index;
local_irq_disable();
wrmsrl (MSR_VIA_BCR2, bcr2.val); wrmsrl (MSR_VIA_BCR2, bcr2.val);
local_irq_enable();
__hlt(); __hlt();
/* Disable software clock multiplier */ /* Disable software clock multiplier */
rdmsrl (MSR_VIA_BCR2, bcr2.val); rdmsrl (MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0; bcr2.bits.ESOFTBF = 0;
local_irq_disable();
wrmsrl (MSR_VIA_BCR2, bcr2.val); wrmsrl (MSR_VIA_BCR2, bcr2.val);
local_irq_enable();
break; break;
/* /*
* Powersaver. (Ezra-T [C5M], Nehemiah [C5N]) * Longhaul v3 (aka Powersaver). (Ezra-T [C5M])
* We can scale voltage with this too, but that's currently * We can scale voltage with this too, but that's currently
* disabled until we come up with a decent 'match freq to voltage' * disabled until we come up with a decent 'match freq to voltage'
* algorithm. * algorithm.
* We also need to do the voltage/freq setting in order depending * When we add voltage scaling, we will also need to do the
* on the direction of scaling (like we do in powernow-k7.c) * voltage/freq setting in order depending on the direction
* Ezra-T was alleged to do FSB scaling too, but it never worked in practice. * of scaling (like we do in powernow-k7.c)
*/ */
case 2: case 2:
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); do_powersaver(&longhaul, clock_ratio_index, 3);
longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul.bits.EnableSoftBusRatio = 1;
/* We must program the revision key only with values we
* know about, not blindly copy it from 0:3 */
longhaul.bits.RevisionKey = 3; /* SoftVID & SoftBSEL */
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
__hlt();
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
longhaul.bits.EnableSoftBusRatio = 0;
longhaul.bits.RevisionKey = 3;
wrmsrl (MSR_VIA_LONGHAUL, longhaul.val);
break; break;
case 3:
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul.bits.EnableSoftBusRatio = 1;
longhaul.bits.RevisionKey = 0x0;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
__hlt();
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); /*
longhaul.bits.EnableSoftBusRatio = 0; * Powersaver. (Nehemiah [C5N])
longhaul.bits.RevisionKey = 0xf; * As for Ezra-T, we don't do voltage yet.
wrmsrl (MSR_VIA_LONGHAUL, longhaul.val); * This can do FSB scaling too, but it has never been proven
* to work in practice.
*/
case 3:
do_powersaver(&longhaul, clock_ratio_index, 0xf);
break; break;
} }
......
...@@ -6,8 +6,6 @@ ...@@ -6,8 +6,6 @@
* Licensed under the terms of the GNU GPL License version 2. * Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by AMD. * Based upon datasheets & sample CPUs kindly provided by AMD.
* *
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*
* Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt. * Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt.
* - We cli/sti on stepping A0 CPUs around the FID/VID transition. * - We cli/sti on stepping A0 CPUs around the FID/VID transition.
* Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect. * Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect.
...@@ -29,21 +27,13 @@ ...@@ -29,21 +27,13 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/system.h> #include <asm/system.h>
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) #ifdef CONFIG_X86_POWERNOW_K7_ACPI
#include <linux/acpi.h> #include <linux/acpi.h>
#include <acpi/processor.h> #include <acpi/processor.h>
#endif #endif
#include "powernow-k7.h" #include "powernow-k7.h"
#define DEBUG
#ifdef DEBUG
#define dprintk(msg...) printk(msg)
#else
#define dprintk(msg...) do { } while(0)
#endif
#define PFX "powernow: " #define PFX "powernow: "
...@@ -64,7 +54,7 @@ struct pst_s { ...@@ -64,7 +54,7 @@ struct pst_s {
u8 numpstates; u8 numpstates;
}; };
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) #ifdef CONFIG_X86_POWERNOW_K7_ACPI
union powernow_acpi_control_t { union powernow_acpi_control_t {
struct { struct {
unsigned long fid:5, unsigned long fid:5,
...@@ -97,6 +87,7 @@ static int fid_codes[32] = { ...@@ -97,6 +87,7 @@ static int fid_codes[32] = {
*/ */
static int acpi_force; static int acpi_force;
static int debug;
static struct cpufreq_frequency_table *powernow_table; static struct cpufreq_frequency_table *powernow_table;
...@@ -109,6 +100,21 @@ static unsigned int fsb; ...@@ -109,6 +100,21 @@ static unsigned int fsb;
static unsigned int latency; static unsigned int latency;
static char have_a0; static char have_a0;
static void dprintk(const char *fmt, ...)
{
char s[256];
va_list args;
if (debug==0)
return;
va_start(args,fmt);
vsprintf(s, fmt, args);
printk(s);
va_end(args);
}
static int check_fsb(unsigned int fsbspeed) static int check_fsb(unsigned int fsbspeed)
{ {
int delta; int delta;
...@@ -190,13 +196,13 @@ static int get_ranges (unsigned char *pst) ...@@ -190,13 +196,13 @@ static int get_ranges (unsigned char *pst)
speed = powernow_table[j].frequency; speed = powernow_table[j].frequency;
if ((fid_codes[fid] % 10)==5) { if ((fid_codes[fid] % 10)==5) {
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) #ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (have_a0 == 1) if (have_a0 == 1)
powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID; powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID;
#endif #endif
} }
dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz])\t", fid, dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz]) ", fid,
fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000); fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000);
if (speed < minimum_speed) if (speed < minimum_speed)
...@@ -294,7 +300,7 @@ static void change_speed (unsigned int index) ...@@ -294,7 +300,7 @@ static void change_speed (unsigned int index)
} }
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) #ifdef CONFIG_X86_POWERNOW_K7_ACPI
struct acpi_processor_performance *acpi_processor_perf; struct acpi_processor_performance *acpi_processor_perf;
...@@ -377,7 +383,7 @@ static int powernow_acpi_init(void) ...@@ -377,7 +383,7 @@ static int powernow_acpi_init(void)
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
} }
dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz])\t", fid, dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz]) ", fid,
fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000); fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000);
dprintk ("VID: 0x%x (%d.%03dV)\n", vid, mobile_vid_table[vid]/1000, dprintk ("VID: 0x%x (%d.%03dV)\n", vid, mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000); mobile_vid_table[vid]%1000);
...@@ -467,9 +473,9 @@ static int powernow_decode_bios (int maxfid, int startvid) ...@@ -467,9 +473,9 @@ static int powernow_decode_bios (int maxfid, int startvid)
(maxfid==pst->maxfid) && (startvid==pst->startvid)) (maxfid==pst->maxfid) && (startvid==pst->startvid))
{ {
dprintk (KERN_INFO PFX "PST:%d (@%p)\n", i, pst); dprintk (KERN_INFO PFX "PST:%d (@%p)\n", i, pst);
dprintk (KERN_INFO PFX " cpuid: 0x%x\t", pst->cpuid); dprintk (KERN_INFO PFX " cpuid: 0x%x ", pst->cpuid);
dprintk ("fsb: %d\t", pst->fsbspeed); dprintk ("fsb: %d ", pst->fsbspeed);
dprintk ("maxFID: 0x%x\t", pst->maxfid); dprintk ("maxFID: 0x%x ", pst->maxfid);
dprintk ("startvid: 0x%x\n", pst->startvid); dprintk ("startvid: 0x%x\n", pst->startvid);
ret = get_ranges ((char *) pst + sizeof (struct pst_s)); ret = get_ranges ((char *) pst + sizeof (struct pst_s));
...@@ -591,7 +597,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) ...@@ -591,7 +597,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
/* A K7 with powernow technology is set to max frequency by BIOS */ /* A K7 with powernow technology is set to max frequency by BIOS */
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID]; fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.MFID];
if (!fsb) { if (!fsb) {
printk(KERN_WARNING PFX "can not determine bus frequency\n"); printk(KERN_WARNING PFX "can not determine bus frequency\n");
return -EINVAL; return -EINVAL;
...@@ -668,7 +674,7 @@ static int __init powernow_init (void) ...@@ -668,7 +674,7 @@ static int __init powernow_init (void)
static void __exit powernow_exit (void) static void __exit powernow_exit (void)
{ {
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) #ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) { if (acpi_processor_perf) {
acpi_processor_unregister_performance(acpi_processor_perf, 0); acpi_processor_unregister_performance(acpi_processor_perf, 0);
kfree(acpi_processor_perf); kfree(acpi_processor_perf);
...@@ -679,8 +685,10 @@ static void __exit powernow_exit (void) ...@@ -679,8 +685,10 @@ static void __exit powernow_exit (void)
kfree(powernow_table); kfree(powernow_table);
} }
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "enable debug output.");
module_param(acpi_force, int, 0444); module_param(acpi_force, int, 0444);
MODULE_PARM_DESC(acpi_force, "Force ACPI to be used"); MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors.");
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/delay.h> #include <asm/delay.h>
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) #ifdef CONFIG_X86_POWERNOW_K8_ACPI
#include <linux/acpi.h> #include <linux/acpi.h>
#include <acpi/processor.h> #include <acpi/processor.h>
#endif #endif
...@@ -664,7 +664,7 @@ static int find_psb_table(struct powernow_k8_data *data) ...@@ -664,7 +664,7 @@ static int find_psb_table(struct powernow_k8_data *data)
return -ENODEV; return -ENODEV;
} }
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) #ifdef CONFIG_X86_POWERNOW_K8_ACPI
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
{ {
if (!data->acpi_data.state_count) if (!data->acpi_data.state_count)
...@@ -1024,7 +1024,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1024,7 +1024,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
return -ENODEV; return -ENODEV;
} }
static int __exit powernowk8_cpu_exit (struct cpufreq_policy *pol) static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
{ {
struct powernow_k8_data *data = powernow_data[pol->cpu]; struct powernow_k8_data *data = powernow_data[pol->cpu];
...@@ -1076,7 +1076,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = { ...@@ -1076,7 +1076,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.verify = powernowk8_verify, .verify = powernowk8_verify,
.target = powernowk8_target, .target = powernowk8_target,
.init = powernowk8_cpu_init, .init = powernowk8_cpu_init,
.exit = powernowk8_cpu_exit, .exit = __devexit_p(powernowk8_cpu_exit),
.get = powernowk8_get, .get = powernowk8_get,
.name = "powernow-k8", .name = "powernow-k8",
.owner = THIS_MODULE, .owner = THIS_MODULE,
......
...@@ -40,25 +40,24 @@ ...@@ -40,25 +40,24 @@
struct cpu_id struct cpu_id
{ {
__u8 x86; /* CPU family */
__u8 x86_vendor; /* CPU vendor */ __u8 x86_vendor; /* CPU vendor */
__u8 x86; /* CPU family */
__u8 x86_model; /* model */ __u8 x86_model; /* model */
__u8 x86_mask; /* stepping */ __u8 x86_mask; /* stepping */
}; };
static const struct cpu_id cpu_id_banias = { enum {
.x86_vendor = X86_VENDOR_INTEL, CPU_BANIAS,
.x86 = 6, CPU_DOTHAN_A1,
.x86_model = 9, CPU_DOTHAN_B0,
.x86_mask = 5,
}; };
static const struct cpu_id cpu_id_dothan_a1 = { static const struct cpu_id cpu_ids[] = {
.x86_vendor = X86_VENDOR_INTEL, [CPU_BANIAS] = { X86_VENDOR_INTEL, 6, 9, 5 },
.x86 = 6, [CPU_DOTHAN_A1] = { X86_VENDOR_INTEL, 6, 13, 1 },
.x86_model = 13, [CPU_DOTHAN_B0] = { X86_VENDOR_INTEL, 6, 13, 6 },
.x86_mask = 1,
}; };
#define N_IDS (sizeof(cpu_ids)/sizeof(cpu_ids[0]))
struct cpu_model struct cpu_model
{ {
...@@ -68,7 +67,7 @@ struct cpu_model ...@@ -68,7 +67,7 @@ struct cpu_model
struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
}; };
static int centrino_verify_cpu_id(struct cpuinfo_x86 *c, const struct cpu_id *x); static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x);
/* Operating points for current CPU */ /* Operating points for current CPU */
static struct cpu_model *centrino_model; static struct cpu_model *centrino_model;
...@@ -199,13 +198,13 @@ static struct cpufreq_frequency_table banias_1700[] = ...@@ -199,13 +198,13 @@ static struct cpufreq_frequency_table banias_1700[] =
.max_freq = (max)*1000, \ .max_freq = (max)*1000, \
.op_points = banias_##max, \ .op_points = banias_##max, \
} }
#define BANIAS(max) _BANIAS(&cpu_id_banias, max, #max) #define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
/* CPU models, their operating frequency range, and freq/voltage /* CPU models, their operating frequency range, and freq/voltage
operating points */ operating points */
static struct cpu_model models[] = static struct cpu_model models[] =
{ {
_BANIAS(&cpu_id_banias, 900, " 900"), _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
BANIAS(1000), BANIAS(1000),
BANIAS(1100), BANIAS(1100),
BANIAS(1200), BANIAS(1200),
...@@ -214,6 +213,11 @@ static struct cpu_model models[] = ...@@ -214,6 +213,11 @@ static struct cpu_model models[] =
BANIAS(1500), BANIAS(1500),
BANIAS(1600), BANIAS(1600),
BANIAS(1700), BANIAS(1700),
/* NULL model_name is a wildcard */
{ &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
{ &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
{ NULL, } { NULL, }
}; };
#undef _BANIAS #undef _BANIAS
...@@ -224,17 +228,28 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) ...@@ -224,17 +228,28 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
struct cpu_model *model; struct cpu_model *model;
for(model = models; model->model_name != NULL; model++) for(model = models; model->cpu_id != NULL; model++)
if ((strcmp(cpu->x86_model_id, model->model_name) == 0) && if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
(!centrino_verify_cpu_id(cpu, model->cpu_id))) (model->model_name == NULL ||
strcmp(cpu->x86_model_id, model->model_name) == 0))
break; break;
if (model->model_name == NULL) {
if (model->cpu_id == NULL) {
/* No match at all */
printk(KERN_INFO PFX "no support for CPU model \"%s\": " printk(KERN_INFO PFX "no support for CPU model \"%s\": "
"send /proc/cpuinfo to " MAINTAINER "\n", "send /proc/cpuinfo to " MAINTAINER "\n",
cpu->x86_model_id); cpu->x86_model_id);
return -ENOENT; return -ENOENT;
} }
if (model->op_points == NULL) {
/* Matched a non-match */
printk(KERN_INFO PFX "no table support for CPU model \"%s\": \n",
cpu->x86_model_id);
printk(KERN_INFO PFX "try compiling with CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI enabled\n");
return -ENOENT;
}
centrino_model = model; centrino_model = model;
printk(KERN_INFO PFX "found \"%s\": max frequency: %dkHz\n", printk(KERN_INFO PFX "found \"%s\": max frequency: %dkHz\n",
...@@ -247,14 +262,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) ...@@ -247,14 +262,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; } static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; }
#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
static int centrino_verify_cpu_id(struct cpuinfo_x86 *c, const struct cpu_id *x) static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x)
{ {
if ((c->x86 == x->x86) && if ((c->x86 == x->x86) &&
(c->x86_vendor == x->x86_vendor) && (c->x86_vendor == x->x86_vendor) &&
(c->x86_model == x->x86_model) && (c->x86_model == x->x86_model) &&
(c->x86_mask == x->x86_mask)) (c->x86_mask == x->x86_mask))
return 1;
return 0; return 0;
return -ENODEV;
} }
/* Extract clock in kHz from PERF_CTL value */ /* Extract clock in kHz from PERF_CTL value */
...@@ -340,6 +355,12 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) ...@@ -340,6 +355,12 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
goto err_unreg; goto err_unreg;
} }
if (p.states[i].core_frequency > p.states[0].core_frequency) {
printk(KERN_DEBUG "P%u has larger frequency than P0, skipping\n", i);
p.states[i].core_frequency = 0;
continue;
}
if (extract_clock(p.states[i].control) != if (extract_clock(p.states[i].control) !=
(p.states[i].core_frequency * 1000)) { (p.states[i].core_frequency * 1000)) {
printk(KERN_DEBUG "Invalid encoded frequency\n"); printk(KERN_DEBUG "Invalid encoded frequency\n");
...@@ -371,6 +392,8 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) ...@@ -371,6 +392,8 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
centrino_model->op_points[i].frequency = p.states[i].core_frequency * 1000; centrino_model->op_points[i].frequency = p.states[i].core_frequency * 1000;
if (cur_freq == centrino_model->op_points[i].frequency) if (cur_freq == centrino_model->op_points[i].frequency)
p.state = i; p.state = i;
if (!p.states[i].core_frequency)
centrino_model->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
} }
centrino_model->op_points[p.state_count].frequency = CPUFREQ_TABLE_END; centrino_model->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
...@@ -392,15 +415,20 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) ...@@ -392,15 +415,20 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
unsigned freq; unsigned freq;
unsigned l, h; unsigned l, h;
int ret; int ret;
int i;
if (policy->cpu != 0) if (policy->cpu != 0)
return -ENODEV; return -ENODEV;
if (!cpu_has(cpu, X86_FEATURE_EST)) /* Only Intel makes Enhanced Speedstep-capable CPUs */
if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV; return -ENODEV;
if ((centrino_verify_cpu_id(cpu, &cpu_id_banias)) && for (i = 0; i < N_IDS; i++)
(centrino_verify_cpu_id(cpu, &cpu_id_dothan_a1))) { if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
break;
if (i == N_IDS) {
printk(KERN_INFO PFX "found unsupported CPU with Enhanced SpeedStep: " printk(KERN_INFO PFX "found unsupported CPU with Enhanced SpeedStep: "
"send /proc/cpuinfo to " MAINTAINER "\n"); "send /proc/cpuinfo to " MAINTAINER "\n");
return -ENODEV; return -ENODEV;
......
...@@ -247,14 +247,14 @@ static int speedstep_target (struct cpufreq_policy *policy, ...@@ -247,14 +247,14 @@ static int speedstep_target (struct cpufreq_policy *policy,
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
return -EINVAL; return -EINVAL;
freqs.old = speedstep_get(policy->cpu);
freqs.new = speedstep_freqs[newstate].frequency;
freqs.cpu = policy->cpu;
/* no transition necessary */ /* no transition necessary */
if (freqs.old == freqs.new) if (freqs.old == freqs.new)
return 0; return 0;
freqs.old = speedstep_get_processor_frequency(speedstep_processor);
freqs.new = speedstep_freqs[newstate].frequency;
freqs.cpu = policy->cpu;
cpus_allowed = current->cpus_allowed; cpus_allowed = current->cpus_allowed;
/* only run on CPU to be set, or on its sibling */ /* only run on CPU to be set, or on its sibling */
...@@ -324,14 +324,13 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) ...@@ -324,14 +324,13 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
&speedstep_freqs[SPEEDSTEP_LOW].frequency, &speedstep_freqs[SPEEDSTEP_LOW].frequency,
&speedstep_freqs[SPEEDSTEP_HIGH].frequency, &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
&speedstep_set_state); &speedstep_set_state);
if (result) {
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed(current, cpus_allowed);
if (result) {
return result; return result;
} }
/* get current speed setting */ /* get current speed setting */
speed = speedstep_get_processor_frequency(speedstep_processor); speed = speedstep_get(policy->cpu);
set_cpus_allowed(current, cpus_allowed);
if (!speed) if (!speed)
return -EIO; return -EIO;
...@@ -362,7 +361,20 @@ static int speedstep_cpu_exit(struct cpufreq_policy *policy) ...@@ -362,7 +361,20 @@ static int speedstep_cpu_exit(struct cpufreq_policy *policy)
static unsigned int speedstep_get(unsigned int cpu) static unsigned int speedstep_get(unsigned int cpu)
{ {
return speedstep_get_processor_frequency(speedstep_processor); unsigned int speed;
cpumask_t cpus_allowed,affected_cpu_map;
/* only run on CPU to be set, or on its sibling */
cpus_allowed = current->cpus_allowed;
#ifdef CONFIG_SMP
affected_cpu_map = cpu_sibling_map[cpu];
#else
affected_cpu_map = cpumask_of_cpu(cpu);
#endif
set_cpus_allowed(current, affected_cpu_map);
speed=speedstep_get_processor_frequency(speedstep_processor);
set_cpus_allowed(current, cpus_allowed);
return speed;
} }
static struct freq_attr* speedstep_attr[] = { static struct freq_attr* speedstep_attr[] = {
......
...@@ -115,6 +115,11 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) ...@@ -115,6 +115,11 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
: "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi) : "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi)
: "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0) : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0)
); );
/* abort if results are obviously incorrect... */
if ((high_mhz + low_mhz) < 600)
return -EINVAL;
*high = high_mhz * 1000; *high = high_mhz * 1000;
*low = low_mhz * 1000; *low = low_mhz * 1000;
...@@ -180,7 +185,7 @@ static void speedstep_set_state (unsigned int state) ...@@ -180,7 +185,7 @@ static void speedstep_set_state (unsigned int state)
local_irq_restore(flags); local_irq_restore(flags);
if (new_state == state) { if (new_state == state) {
dprintk(KERN_INFO "cpufreq: change to %u MHz succeeded after %u tries with result %u\n", (freqs.new / 1000), retry, result); dprintk(KERN_INFO "cpufreq: change to %u MHz succeeded after %u tries with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result);
} else { } else {
printk(KERN_ERR "cpufreq: change failed with new_state %u and result %u\n", new_state, result); printk(KERN_ERR "cpufreq: change failed with new_state %u and result %u\n", new_state, result);
} }
......
...@@ -41,4 +41,9 @@ config X86_POWERNOW_K8 ...@@ -41,4 +41,9 @@ config X86_POWERNOW_K8
If in doubt, say N. If in doubt, say N.
config X86_POWERNOW_K8_ACPI
bool
depends on ((X86_POWERNOW_K8 = "m" && ACPI_PROCESSOR) || (X86_POWERNOW_K8 = "y" && ACPI_PROCESSOR = "y"))
default y
endmenu endmenu
...@@ -99,6 +99,86 @@ static void cpufreq_cpu_put(struct cpufreq_policy *data) ...@@ -99,6 +99,86 @@ static void cpufreq_cpu_put(struct cpufreq_policy *data)
module_put(cpufreq_driver->owner); module_put(cpufreq_driver->owner);
} }
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
/**
* adjust_jiffies - adjust the system "loops_per_jiffy"
*
* This function alters the system "loops_per_jiffy" for the clock
* speed change. Note that loops_per_jiffy cannot be updated on SMP
* systems as each CPU might be scaled differently. So, use the arch
* per-CPU loops_per_jiffy value wherever possible.
*/
#ifndef CONFIG_SMP
static unsigned long l_p_j_ref;
static unsigned int l_p_j_ref_freq;
static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
if (ci->flags & CPUFREQ_CONST_LOOPS)
return;
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
}
if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
(val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
(val == CPUFREQ_RESUMECHANGE))
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new);
}
#else
static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; }
#endif
/**
* cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition
*
* This function calls the transition notifiers and the "adjust_jiffies" function. It is called
* twice on all CPU frequency changes that have external effects.
*/
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
BUG_ON(irqs_disabled());
freqs->flags = cpufreq_driver->flags;
down_read(&cpufreq_notifier_rwsem);
switch (state) {
case CPUFREQ_PRECHANGE:
/* detect if the driver reported a value as "old frequency" which
* is not equal to what the cpufreq core thinks is "old frequency".
*/
if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
if ((likely(cpufreq_cpu_data[freqs->cpu]->cur)) &&
(unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur)))
{
if (cpufreq_driver->flags & CPUFREQ_PANIC_OUTOFSYNC)
panic("CPU Frequency is out of sync.");
printk(KERN_WARNING "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur);
freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
}
}
notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs);
cpufreq_cpu_data[freqs->cpu]->cur = freqs->new;
break;
}
up_read(&cpufreq_notifier_rwsem);
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
/********************************************************************* /*********************************************************************
* SYSFS INTERFACE * * SYSFS INTERFACE *
*********************************************************************/ *********************************************************************/
...@@ -617,8 +697,8 @@ static int cpufreq_resume(struct sys_device * sysdev) ...@@ -617,8 +697,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
if (cpufreq_driver->flags & CPUFREQ_PANIC_RESUME_OUTOFSYNC) if (cpufreq_driver->flags & CPUFREQ_PANIC_RESUME_OUTOFSYNC)
panic("CPU Frequency is out of sync."); panic("CPU Frequency is out of sync.");
printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing" printk(KERN_WARNING "Warning: CPU frequency is %u, "
"core thinks of %u, is %u kHz.\n", cpu_policy->cur, cur_freq); "cpufreq assumed %u kHz.\n", cur_freq, cpu_policy->cur);
freqs.cpu = cpu; freqs.cpu = cpu;
freqs.old = cpu_policy->cur; freqs.old = cpu_policy->cur;
...@@ -626,6 +706,8 @@ static int cpufreq_resume(struct sys_device * sysdev) ...@@ -626,6 +706,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs); notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs);
adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
cpu_policy->cur = cur_freq;
} }
} }
...@@ -1005,87 +1087,6 @@ int cpufreq_update_policy(unsigned int cpu) ...@@ -1005,87 +1087,6 @@ int cpufreq_update_policy(unsigned int cpu)
EXPORT_SYMBOL(cpufreq_update_policy); EXPORT_SYMBOL(cpufreq_update_policy);
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
/**
* adjust_jiffies - adjust the system "loops_per_jiffy"
*
* This function alters the system "loops_per_jiffy" for the clock
* speed change. Note that loops_per_jiffy cannot be updated on SMP
* systems as each CPU might be scaled differently. So, use the arch
* per-CPU loops_per_jiffy value wherever possible.
*/
#ifndef CONFIG_SMP
static unsigned long l_p_j_ref;
static unsigned int l_p_j_ref_freq;
static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
if (ci->flags & CPUFREQ_CONST_LOOPS)
return;
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
}
if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
(val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
(val == CPUFREQ_RESUMECHANGE))
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new);
}
#else
static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; }
#endif
/**
* cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition
*
* This function calls the transition notifiers and the "adjust_jiffies" function. It is called
* twice on all CPU frequency changes that have external effects.
*/
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
BUG_ON(irqs_disabled());
freqs->flags = cpufreq_driver->flags;
down_read(&cpufreq_notifier_rwsem);
switch (state) {
case CPUFREQ_PRECHANGE:
/* detect if the driver reported a value as "old frequency" which
* is not equal to what the cpufreq core thinks is "old frequency".
*/
if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
if ((likely(cpufreq_cpu_data[freqs->cpu]->cur)) &&
(unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur)))
{
if (cpufreq_driver->flags & CPUFREQ_PANIC_OUTOFSYNC)
panic("CPU Frequency is out of sync.");
printk(KERN_WARNING "Warning: CPU frequency out of sync: "
"cpufreq and timing core thinks of %u, is %u kHz.\n",
cpufreq_cpu_data[freqs->cpu]->cur, freqs->old);
freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
}
}
notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs);
cpufreq_cpu_data[freqs->cpu]->cur = freqs->new;
break;
}
up_read(&cpufreq_notifier_rwsem);
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
/********************************************************************* /*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER * * REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/ *********************************************************************/
......
...@@ -82,6 +82,13 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -82,6 +82,13 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
{ {
struct cpufreq_freqs *freq = data; struct cpufreq_freqs *freq = data;
/* Don't update cur_freq if CPU is managed and we're
* waking up: else we won't remember what frequency
* we need to set the CPU to.
*/
if (cpu_is_managed[freq->cpu] && (val == CPUFREQ_RESUMECHANGE))
return 0;
cpu_cur_freq[freq->cpu] = freq->new; cpu_cur_freq[freq->cpu] = freq->new;
return 0; return 0;
...@@ -522,6 +529,9 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, ...@@ -522,6 +529,9 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
else if (policy->min > cpu_cur_freq[cpu]) else if (policy->min > cpu_cur_freq[cpu])
__cpufreq_driver_target(&current_policy[cpu], policy->min, __cpufreq_driver_target(&current_policy[cpu], policy->min,
CPUFREQ_RELATION_L); CPUFREQ_RELATION_L);
else
__cpufreq_driver_target(&current_policy[cpu], cpu_cur_freq[cpu],
CPUFREQ_RELATION_L);
memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy)); memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy));
up(&userspace_sem); up(&userspace_sem);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment