Commit bdbda1c4 authored by Dave Jones's avatar Dave Jones

Merge delerium.codemonkey.org.uk:/mnt/data/src/bk/bk-linus

into delerium.codemonkey.org.uk:/mnt/data/src/bk/cpufreq
parents afb7238d d883263c
......@@ -7,7 +7,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
......@@ -19,7 +19,7 @@
static struct cpufreq_driver longrun_driver;
/**
* longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
* longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
* values into per cent values. In TMTA microcode, the following is valid:
* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
*/
......@@ -42,18 +42,18 @@ static void __init longrun_get_policy(struct cpufreq_policy *policy)
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
msr_lo &= 0x0000007F;
msr_hi &= 0x0000007F;
if ( longrun_high_freq <= longrun_low_freq ) {
/* Assume degenerate Longrun table */
policy->min = policy->max = longrun_high_freq;
} else {
policy->min = longrun_low_freq + msr_lo *
policy->min = longrun_low_freq + msr_lo *
((longrun_high_freq - longrun_low_freq) / 100);
policy->max = longrun_low_freq + msr_hi *
policy->max = longrun_low_freq + msr_hi *
((longrun_high_freq - longrun_low_freq) / 100);
}
policy->cpu = 0;
......@@ -79,9 +79,9 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
/* Assume degenerate Longrun table */
pctg_lo = pctg_hi = 100;
} else {
pctg_lo = (policy->min - longrun_low_freq) /
pctg_lo = (policy->min - longrun_low_freq) /
((longrun_high_freq - longrun_low_freq) / 100);
pctg_hi = (policy->max - longrun_low_freq) /
pctg_hi = (policy->max - longrun_low_freq) /
((longrun_high_freq - longrun_low_freq) / 100);
}
......@@ -118,7 +118,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
* longrun_verify_poliy - verifies a new CPUFreq policy
* @policy: the policy to verify
*
* Validates a new CPUFreq policy. This function has to be called with
* Validates a new CPUFreq policy. This function has to be called with
* cpufreq_driver locked.
*/
static int longrun_verify_policy(struct cpufreq_policy *policy)
......@@ -127,8 +127,8 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
return -EINVAL;
policy->cpu = 0;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
......@@ -160,7 +160,7 @@ static unsigned int longrun_get(unsigned int cpu)
* TMTA rules:
* performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
*/
static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
unsigned int *high_freq)
{
u32 msr_lo, msr_hi;
......@@ -174,9 +174,9 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
if (cpu_has(c, X86_FEATURE_LRTI)) {
/* if the LongRun Table Interface is present, the
* detection is a bit easier:
* detection is a bit easier:
* For minimum frequency, read out the maximum
* level (msr_hi), write that into "currently
* level (msr_hi), write that into "currently
* selected level", and read out the frequency.
* For maximum frequency, read out level zero.
*/
......@@ -223,7 +223,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
/* restore values */
wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
}
/* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
......@@ -237,7 +237,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
if ((ecx > 95) || (ecx == 0) || (eax < ebx))
return -EIO;
edx = (eax - ebx) / (100 - ecx);
edx = (eax - ebx) / (100 - ecx);
*low_freq = edx * 1000; /* back to kHz */
if (*low_freq > *high_freq)
......@@ -249,7 +249,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
static int __init longrun_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
int result = 0;
/* capability check */
if (policy->cpu != 0)
......@@ -265,15 +265,15 @@ static int __init longrun_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.max_freq = longrun_high_freq;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
longrun_get_policy(policy);
return 0;
}
static struct cpufreq_driver longrun_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = longrun_verify_policy,
.setpolicy = longrun_set_policy,
.verify = longrun_verify_policy,
.setpolicy = longrun_set_policy,
.get = longrun_get,
.init = longrun_cpu_init,
.name = "longrun",
......@@ -290,7 +290,7 @@ static int __init longrun_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
!cpu_has(c, X86_FEATURE_LONGRUN))
return -ENODEV;
......
......@@ -6,8 +6,6 @@
* Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by AMD.
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*
* Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt.
* - We cli/sti on stepping A0 CPUs around the FID/VID transition.
* Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect.
......@@ -36,14 +34,6 @@
#include "powernow-k7.h"
#define DEBUG
#ifdef DEBUG
#define dprintk(msg...) printk(msg)
#else
#define dprintk(msg...) do { } while(0)
#endif
#define PFX "powernow: "
......@@ -97,6 +87,7 @@ static int fid_codes[32] = {
*/
static int acpi_force;
static int debug;
static struct cpufreq_frequency_table *powernow_table;
......@@ -109,6 +100,21 @@ static unsigned int fsb;
static unsigned int latency;
static char have_a0;
static void dprintk(const char *fmt, ...)
{
char s[256];
va_list args;
if (debug==0)
return;
va_start(args,fmt);
vsprintf(s, fmt, args);
printk(s);
va_end(args);
}
static int check_fsb(unsigned int fsbspeed)
{
int delta;
......@@ -196,7 +202,7 @@ static int get_ranges (unsigned char *pst)
#endif
}
dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz])\t", fid,
dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz]) ", fid,
fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000);
if (speed < minimum_speed)
......@@ -285,7 +291,7 @@ static void change_speed (unsigned int index)
change_VID(vid);
change_FID(fid);
}
if (have_a0 == 1)
local_irq_enable();
......@@ -377,7 +383,7 @@ static int powernow_acpi_init(void)
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
}
dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz])\t", fid,
dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz]) ", fid,
fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000);
dprintk ("VID: 0x%x (%d.%03dV)\n", vid, mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);
......@@ -467,9 +473,9 @@ static int powernow_decode_bios (int maxfid, int startvid)
(maxfid==pst->maxfid) && (startvid==pst->startvid))
{
dprintk (KERN_INFO PFX "PST:%d (@%p)\n", i, pst);
dprintk (KERN_INFO PFX " cpuid: 0x%x\t", pst->cpuid);
dprintk ("fsb: %d\t", pst->fsbspeed);
dprintk ("maxFID: 0x%x\t", pst->maxfid);
dprintk (KERN_INFO PFX " cpuid: 0x%x ", pst->cpuid);
dprintk ("fsb: %d ", pst->fsbspeed);
dprintk ("maxFID: 0x%x ", pst->maxfid);
dprintk ("startvid: 0x%x\n", pst->startvid);
ret = get_ranges ((char *) pst + sizeof (struct pst_s));
......@@ -591,7 +597,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
/* A K7 with powernow technology is set to max frequency by BIOS */
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.MFID];
if (!fsb) {
printk(KERN_WARNING PFX "can not determine bus frequency\n");
return -EINVAL;
......@@ -648,14 +654,14 @@ static struct freq_attr* powernow_table_attr[] = {
};
static struct cpufreq_driver powernow_driver = {
.verify = powernow_verify,
.target = powernow_target,
.get = powernow_get,
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
.owner = THIS_MODULE,
.attr = powernow_table_attr,
.verify = powernow_verify,
.target = powernow_target,
.get = powernow_get,
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
.owner = THIS_MODULE,
.attr = powernow_table_attr,
};
static int __init powernow_init (void)
......@@ -679,8 +685,10 @@ static void __exit powernow_exit (void)
kfree(powernow_table);
}
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "enable debug output.");
module_param(acpi_force, int, 0444);
MODULE_PARM_DESC(acpi_force, "Force ACPI to be used");
MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors.");
......
......@@ -60,6 +60,13 @@ static const struct cpu_id cpu_id_dothan_a1 = {
.x86_mask = 1,
};
static const struct cpu_id cpu_id_dothan_b0 = {
.x86_vendor = X86_VENDOR_INTEL,
.x86 = 6,
.x86_model = 13,
.x86_mask = 6,
};
struct cpu_model
{
const struct cpu_id *cpu_id;
......@@ -214,7 +221,7 @@ static struct cpu_model models[] =
BANIAS(1500),
BANIAS(1600),
BANIAS(1700),
{ 0, }
{ NULL, }
};
#undef _BANIAS
#undef BANIAS
......@@ -400,7 +407,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
if ((centrino_verify_cpu_id(cpu, &cpu_id_banias)) &&
(centrino_verify_cpu_id(cpu, &cpu_id_dothan_a1))) {
(centrino_verify_cpu_id(cpu, &cpu_id_dothan_a1)) &&
(centrino_verify_cpu_id(cpu, &cpu_id_dothan_b0))) {
printk(KERN_INFO PFX "found unsupported CPU with Enhanced SpeedStep: "
"send /proc/cpuinfo to " MAINTAINER "\n");
return -ENODEV;
......
......@@ -617,8 +617,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
if (cpufreq_driver->flags & CPUFREQ_PANIC_RESUME_OUTOFSYNC)
panic("CPU Frequency is out of sync.");
printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing"
"core thinks of %u, is %u kHz.\n", cpu_policy->cur, cur_freq);
printk(KERN_WARNING "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n", cur_freq, cpu_policy->cur);
freqs.cpu = cpu;
freqs.old = cpu_policy->cur;
......@@ -626,6 +626,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs);
adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
cpu_policy->cur = cur_freq;
}
}
......@@ -1065,9 +1067,8 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
if (cpufreq_driver->flags & CPUFREQ_PANIC_OUTOFSYNC)
panic("CPU Frequency is out of sync.");
printk(KERN_WARNING "Warning: CPU frequency out of sync: "
"cpufreq and timing core thinks of %u, is %u kHz.\n",
cpufreq_cpu_data[freqs->cpu]->cur, freqs->old);
printk(KERN_WARNING "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur);
freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
}
}
......
......@@ -82,6 +82,13 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
{
struct cpufreq_freqs *freq = data;
/* Don't update cur_freq if CPU is managed and we're
* waking up: else we won't remember what frequency
* we need to set the CPU to.
*/
if (cpu_is_managed[freq->cpu] && (val == CPUFREQ_RESUMECHANGE))
return 0;
cpu_cur_freq[freq->cpu] = freq->new;
return 0;
......@@ -522,6 +529,9 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
else if (policy->min > cpu_cur_freq[cpu])
__cpufreq_driver_target(&current_policy[cpu], policy->min,
CPUFREQ_RELATION_L);
else
__cpufreq_driver_target(&current_policy[cpu], cpu_cur_freq[cpu],
CPUFREQ_RELATION_L);
memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy));
up(&userspace_sem);
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment