Commit b96f0b52 authored by Jia Zhang's avatar Jia Zhang Committed by Khalid Elmously

x86/cpu: Rename cpu_data.x86_mask to cpu_data.x86_stepping

BugLink: https://bugs.launchpad.net/bugs/1883918

commit b399151c upstream.

x86_mask is a confusing name which is hard to associate with the
processor's stepping.

Additionally, correct an indent issue in lib/cpu.c.

[ Backport by Mark Gross to simplify the SRBDS backport ]
Signed-off-by: default avatarJia Zhang <qianyue.zj@alibaba-inc.com>
[ Updated it to more recent kernels. ]
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bp@alien8.de
Cc: tony.luck@intel.com
Link: http://lkml.kernel.org/r/1514771530-70829-1-git-send-email-qianyue.zj@alibaba-inc.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarKamal Mostafa <kamal@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent de2c624e
...@@ -3098,7 +3098,7 @@ static int intel_snb_pebs_broken(int cpu) ...@@ -3098,7 +3098,7 @@ static int intel_snb_pebs_broken(int cpu)
break; break;
case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_SANDYBRIDGE_X:
switch (cpu_data(cpu).x86_mask) { switch (cpu_data(cpu).x86_stepping) {
case 6: rev = 0x618; break; case 6: rev = 0x618; break;
case 7: rev = 0x70c; break; case 7: rev = 0x70c; break;
} }
......
...@@ -1017,7 +1017,7 @@ void __init intel_pmu_lbr_init_atom(void) ...@@ -1017,7 +1017,7 @@ void __init intel_pmu_lbr_init_atom(void)
* on PMU interrupt * on PMU interrupt
*/ */
if (boot_cpu_data.x86_model == 28 if (boot_cpu_data.x86_model == 28
&& boot_cpu_data.x86_mask < 10) { && boot_cpu_data.x86_stepping < 10) {
pr_cont("LBR disabled due to erratum"); pr_cont("LBR disabled due to erratum");
return; return;
} }
......
...@@ -233,7 +233,7 @@ static __initconst const struct x86_pmu p6_pmu = { ...@@ -233,7 +233,7 @@ static __initconst const struct x86_pmu p6_pmu = {
static __init void p6_pmu_rdpmc_quirk(void) static __init void p6_pmu_rdpmc_quirk(void)
{ {
if (boot_cpu_data.x86_mask < 9) { if (boot_cpu_data.x86_stepping < 9) {
/* /*
* PPro erratum 26; fixed in stepping 9 and above. * PPro erratum 26; fixed in stepping 9 and above.
*/ */
......
...@@ -92,7 +92,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) ...@@ -92,7 +92,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
if (boot_cpu_data.x86 == 0x0F && if (boot_cpu_data.x86 == 0x0F &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A) boot_cpu_data.x86_stepping < 0x0A)
return 1; return 1;
else if (amd_e400_c1e_detected) else if (amd_e400_c1e_detected)
return 1; return 1;
......
...@@ -88,7 +88,7 @@ struct cpuinfo_x86 { ...@@ -88,7 +88,7 @@ struct cpuinfo_x86 {
__u8 x86; /* CPU family */ __u8 x86; /* CPU family */
__u8 x86_vendor; /* CPU vendor */ __u8 x86_vendor; /* CPU vendor */
__u8 x86_model; __u8 x86_model;
__u8 x86_mask; __u8 x86_stepping;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
char wp_works_ok; /* It doesn't on 386's */ char wp_works_ok; /* It doesn't on 386's */
......
...@@ -105,7 +105,7 @@ int amd_cache_northbridges(void) ...@@ -105,7 +105,7 @@ int amd_cache_northbridges(void)
if (boot_cpu_data.x86 == 0x10 && if (boot_cpu_data.x86 == 0x10 &&
boot_cpu_data.x86_model >= 0x8 && boot_cpu_data.x86_model >= 0x8 &&
(boot_cpu_data.x86_model > 0x9 || (boot_cpu_data.x86_model > 0x9 ||
boot_cpu_data.x86_mask >= 0x1)) boot_cpu_data.x86_stepping >= 0x1))
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
if (boot_cpu_data.x86 == 0x15) if (boot_cpu_data.x86 == 0x15)
......
...@@ -20,7 +20,7 @@ void foo(void) ...@@ -20,7 +20,7 @@ void foo(void)
OFFSET(CPUINFO_x86, cpuinfo_x86, x86); OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
......
...@@ -115,7 +115,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -115,7 +115,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
return; return;
} }
if (c->x86_model == 6 && c->x86_mask == 1) { if (c->x86_model == 6 && c->x86_stepping == 1) {
const int K6_BUG_LOOP = 1000000; const int K6_BUG_LOOP = 1000000;
int n; int n;
void (*f_vide)(void); void (*f_vide)(void);
...@@ -145,7 +145,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -145,7 +145,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
/* K6 with old style WHCR */ /* K6 with old style WHCR */
if (c->x86_model < 8 || if (c->x86_model < 8 ||
(c->x86_model == 8 && c->x86_mask < 8)) { (c->x86_model == 8 && c->x86_stepping < 8)) {
/* We can only write allocate on the low 508Mb */ /* We can only write allocate on the low 508Mb */
if (mbytes > 508) if (mbytes > 508)
mbytes = 508; mbytes = 508;
...@@ -164,7 +164,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -164,7 +164,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
return; return;
} }
if ((c->x86_model == 8 && c->x86_mask > 7) || if ((c->x86_model == 8 && c->x86_stepping > 7) ||
c->x86_model == 9 || c->x86_model == 13) { c->x86_model == 9 || c->x86_model == 13) {
/* The more serious chips .. */ /* The more serious chips .. */
...@@ -217,7 +217,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) ...@@ -217,7 +217,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
* As per AMD technical note 27212 0.2 * As per AMD technical note 27212 0.2
*/ */
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
rdmsr(MSR_K7_CLK_CTL, l, h); rdmsr(MSR_K7_CLK_CTL, l, h);
if ((l & 0xfff00000) != 0x20000000) { if ((l & 0xfff00000) != 0x20000000) {
printk(KERN_INFO printk(KERN_INFO
...@@ -238,12 +238,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c) ...@@ -238,12 +238,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
* but they are not certified as MP capable. * but they are not certified as MP capable.
*/ */
/* Athlon 660/661 is valid. */ /* Athlon 660/661 is valid. */
if ((c->x86_model == 6) && ((c->x86_mask == 0) || if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
(c->x86_mask == 1))) (c->x86_stepping == 1)))
return; return;
/* Duron 670 is valid */ /* Duron 670 is valid */
if ((c->x86_model == 7) && (c->x86_mask == 0)) if ((c->x86_model == 7) && (c->x86_stepping == 0))
return; return;
/* /*
...@@ -253,8 +253,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c) ...@@ -253,8 +253,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
* See http://www.heise.de/newsticker/data/jow-18.10.01-000 for * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
* more. * more.
*/ */
if (((c->x86_model == 6) && (c->x86_mask >= 2)) || if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
((c->x86_model == 7) && (c->x86_mask >= 1)) || ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
(c->x86_model > 7)) (c->x86_model > 7))
if (cpu_has(c, X86_FEATURE_MP)) if (cpu_has(c, X86_FEATURE_MP))
return; return;
...@@ -574,7 +574,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) ...@@ -574,7 +574,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
/* Set MTRR capability flag if appropriate */ /* Set MTRR capability flag if appropriate */
if (c->x86 == 5) if (c->x86 == 5)
if (c->x86_model == 13 || c->x86_model == 9 || if (c->x86_model == 13 || c->x86_model == 9 ||
(c->x86_model == 8 && c->x86_mask >= 8)) (c->x86_model == 8 && c->x86_stepping >= 8))
set_cpu_cap(c, X86_FEATURE_K6_MTRR); set_cpu_cap(c, X86_FEATURE_K6_MTRR);
#endif #endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
...@@ -933,11 +933,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) ...@@ -933,11 +933,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
/* AMD errata T13 (order #21922) */ /* AMD errata T13 (order #21922) */
if ((c->x86 == 6)) { if ((c->x86 == 6)) {
/* Duron Rev A0 */ /* Duron Rev A0 */
if (c->x86_model == 3 && c->x86_mask == 0) if (c->x86_model == 3 && c->x86_stepping == 0)
size = 64; size = 64;
/* Tbird rev A1/A2 */ /* Tbird rev A1/A2 */
if (c->x86_model == 4 && if (c->x86_model == 4 &&
(c->x86_mask == 0 || c->x86_mask == 1)) (c->x86_stepping == 0 || c->x86_stepping == 1))
size = 256; size = 256;
} }
return size; return size;
...@@ -1074,7 +1074,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ...@@ -1074,7 +1074,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
} }
/* OSVW unavailable or ID unknown, match family-model-stepping range */ /* OSVW unavailable or ID unknown, match family-model-stepping range */
ms = (cpu->x86_model << 4) | cpu->x86_mask; ms = (cpu->x86_model << 4) | cpu->x86_stepping;
while ((range = *erratum++)) while ((range = *erratum++))
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
(ms >= AMD_MODEL_RANGE_START(range)) && (ms >= AMD_MODEL_RANGE_START(range)) &&
......
...@@ -134,7 +134,7 @@ static void init_centaur(struct cpuinfo_x86 *c) ...@@ -134,7 +134,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
clear_cpu_cap(c, X86_FEATURE_TSC); clear_cpu_cap(c, X86_FEATURE_TSC);
break; break;
case 8: case 8:
switch (c->x86_mask) { switch (c->x86_stepping) {
default: default:
name = "2"; name = "2";
break; break;
...@@ -209,7 +209,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) ...@@ -209,7 +209,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
* - Note, it seems this may only be in engineering samples. * - Note, it seems this may only be in engineering samples.
*/ */
if ((c->x86 == 6) && (c->x86_model == 9) && if ((c->x86 == 6) && (c->x86_model == 9) &&
(c->x86_mask == 1) && (size == 65)) (c->x86_stepping == 1) && (size == 65))
size -= 1; size -= 1;
return size; return size;
} }
......
...@@ -646,7 +646,7 @@ void cpu_detect(struct cpuinfo_x86 *c) ...@@ -646,7 +646,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
cpuid(0x00000001, &tfms, &misc, &junk, &cap0); cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 0xf; c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf; c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf; c->x86_stepping = tfms & 0xf;
if (c->x86 == 0xf) if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff; c->x86 += (tfms >> 20) & 0xff;
...@@ -1183,7 +1183,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) ...@@ -1183,7 +1183,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
c->loops_per_jiffy = loops_per_jiffy; c->loops_per_jiffy = loops_per_jiffy;
c->x86_cache_size = 0; c->x86_cache_size = 0;
c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_vendor = X86_VENDOR_UNKNOWN;
c->x86_model = c->x86_mask = 0; /* So far unknown... */ c->x86_model = c->x86_stepping = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */
c->x86_max_cores = 1; c->x86_max_cores = 1;
...@@ -1432,8 +1432,8 @@ void print_cpu_info(struct cpuinfo_x86 *c) ...@@ -1432,8 +1432,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model); printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
if (c->x86_mask || c->cpuid_level >= 0) if (c->x86_stepping || c->cpuid_level >= 0)
printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask); pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
else else
printk(KERN_CONT ")\n"); printk(KERN_CONT ")\n");
......
...@@ -212,7 +212,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) ...@@ -212,7 +212,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
/* common case step number/rev -- exceptions handled below */ /* common case step number/rev -- exceptions handled below */
c->x86_model = (dir1 >> 4) + 1; c->x86_model = (dir1 >> 4) + 1;
c->x86_mask = dir1 & 0xf; c->x86_stepping = dir1 & 0xf;
/* Now cook; the original recipe is by Channing Corn, from Cyrix. /* Now cook; the original recipe is by Channing Corn, from Cyrix.
* We do the same thing for each generation: we work out * We do the same thing for each generation: we work out
......
...@@ -55,7 +55,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -55,7 +55,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* need the microcode to have already been loaded... so if it is * need the microcode to have already been loaded... so if it is
* not, recommend a BIOS update and disable large pages. * not, recommend a BIOS update and disable large pages.
*/ */
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
c->microcode < 0x20e) { c->microcode < 0x20e) {
printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
clear_cpu_cap(c, X86_FEATURE_PSE); clear_cpu_cap(c, X86_FEATURE_PSE);
...@@ -71,7 +71,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -71,7 +71,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
/* CPUID workaround for 0F33/0F34 CPU */ /* CPUID workaround for 0F33/0F34 CPU */
if (c->x86 == 0xF && c->x86_model == 0x3 if (c->x86 == 0xF && c->x86_model == 0x3
&& (c->x86_mask == 0x3 || c->x86_mask == 0x4)) && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
c->x86_phys_bits = 36; c->x86_phys_bits = 36;
/* /*
...@@ -191,8 +191,8 @@ int ppro_with_ram_bug(void) ...@@ -191,8 +191,8 @@ int ppro_with_ram_bug(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask < 8) { boot_cpu_data.x86_stepping < 8) {
printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1; return 1;
} }
return 0; return 0;
...@@ -208,7 +208,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c) ...@@ -208,7 +208,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
* Mask B, Pentium, but not Pentium MMX * Mask B, Pentium, but not Pentium MMX
*/ */
if (c->x86 == 5 && if (c->x86 == 5 &&
c->x86_mask >= 1 && c->x86_mask <= 4 && c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
c->x86_model <= 3) { c->x86_model <= 3) {
/* /*
* Remember we have B step Pentia with bugs * Remember we have B step Pentia with bugs
...@@ -251,7 +251,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) ...@@ -251,7 +251,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
* model 3 mask 3 * model 3 mask 3
*/ */
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
clear_cpu_cap(c, X86_FEATURE_SEP); clear_cpu_cap(c, X86_FEATURE_SEP);
/* /*
...@@ -269,7 +269,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) ...@@ -269,7 +269,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* P4 Xeon errata 037 workaround. * P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache. * Hardware prefetcher may cause stale data to be loaded into the cache.
*/ */
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
if (msr_set_bit(MSR_IA32_MISC_ENABLE, if (msr_set_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
> 0) { > 0) {
...@@ -285,7 +285,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) ...@@ -285,7 +285,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* Specification Update"). * Specification Update").
*/ */
if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
(c->x86_mask < 0x6 || c->x86_mask == 0xb)) (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
set_cpu_bug(c, X86_BUG_11AP); set_cpu_bug(c, X86_BUG_11AP);
...@@ -504,7 +504,7 @@ static void init_intel(struct cpuinfo_x86 *c) ...@@ -504,7 +504,7 @@ static void init_intel(struct cpuinfo_x86 *c)
case 6: case 6:
if (l2 == 128) if (l2 == 128)
p = "Celeron (Mendocino)"; p = "Celeron (Mendocino)";
else if (c->x86_mask == 0 || c->x86_mask == 5) else if (c->x86_stepping == 0 || c->x86_stepping == 5)
p = "Celeron-A"; p = "Celeron-A";
break; break;
......
...@@ -43,7 +43,7 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) ...@@ -43,7 +43,7 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
if (m->model != X86_MODEL_ANY && c->x86_model != m->model) if (m->model != X86_MODEL_ANY && c->x86_model != m->model)
continue; continue;
if (m->steppings != X86_STEPPING_ANY && if (m->steppings != X86_STEPPING_ANY &&
!(BIT(c->x86_mask) & m->steppings)) !(BIT(c->x86_stepping) & m->steppings))
continue; continue;
if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature))
continue; continue;
......
...@@ -1013,7 +1013,7 @@ static bool is_blacklisted(unsigned int cpu) ...@@ -1013,7 +1013,7 @@ static bool is_blacklisted(unsigned int cpu)
*/ */
if (c->x86 == 6 && if (c->x86 == 6 &&
c->x86_model == 79 && c->x86_model == 79 &&
c->x86_mask == 0x01 && c->x86_stepping == 0x01 &&
llc_size_per_core > 2621440 && llc_size_per_core > 2621440 &&
c->microcode < 0x0b000021) { c->microcode < 0x0b000021) {
pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
...@@ -1036,7 +1036,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, ...@@ -1036,7 +1036,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
return UCODE_NFOUND; return UCODE_NFOUND;
sprintf(name, "intel-ucode/%02x-%02x-%02x", sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask); c->x86, c->x86_model, c->x86_stepping);
if (request_firmware_direct(&firmware, name, device)) { if (request_firmware_direct(&firmware, name, device)) {
pr_debug("data file %s load failed\n", name); pr_debug("data file %s load failed\n", name);
......
...@@ -860,7 +860,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, ...@@ -860,7 +860,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
*/ */
if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask <= 7) { boot_cpu_data.x86_stepping <= 7) {
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
return -EINVAL; return -EINVAL;
......
...@@ -699,8 +699,8 @@ void __init mtrr_bp_init(void) ...@@ -699,8 +699,8 @@ void __init mtrr_bp_init(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 0xF && boot_cpu_data.x86 == 0xF &&
boot_cpu_data.x86_model == 0x3 && boot_cpu_data.x86_model == 0x3 &&
(boot_cpu_data.x86_mask == 0x3 || (boot_cpu_data.x86_stepping == 0x3 ||
boot_cpu_data.x86_mask == 0x4)) boot_cpu_data.x86_stepping == 0x4))
phys_addr = 36; phys_addr = 36;
size_or_mask = SIZE_OR_MASK_BITS(phys_addr); size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
......
...@@ -70,8 +70,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -70,8 +70,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
c->x86_model, c->x86_model,
c->x86_model_id[0] ? c->x86_model_id : "unknown"); c->x86_model_id[0] ? c->x86_model_id : "unknown");
if (c->x86_mask || c->cpuid_level >= 0) if (c->x86_stepping || c->cpuid_level >= 0)
seq_printf(m, "stepping\t: %d\n", c->x86_mask); seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
else else
seq_puts(m, "stepping\t: unknown\n"); seq_puts(m, "stepping\t: unknown\n");
if (c->microcode) if (c->microcode)
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#define X86 new_cpu_data+CPUINFO_x86 #define X86 new_cpu_data+CPUINFO_x86
#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
#define X86_MODEL new_cpu_data+CPUINFO_x86_model #define X86_MODEL new_cpu_data+CPUINFO_x86_model
#define X86_MASK new_cpu_data+CPUINFO_x86_mask #define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
...@@ -443,7 +443,7 @@ enable_paging: ...@@ -443,7 +443,7 @@ enable_paging:
shrb $4,%al shrb $4,%al
movb %al,X86_MODEL movb %al,X86_MODEL
andb $0x0f,%cl # mask mask revision andb $0x0f,%cl # mask mask revision
movb %cl,X86_MASK movb %cl,X86_STEPPING
movl %edx,X86_CAPABILITY movl %edx,X86_CAPABILITY
is486: is486:
......
...@@ -406,7 +406,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) ...@@ -406,7 +406,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
processor.cpuflag = CPU_ENABLED; processor.cpuflag = CPU_ENABLED;
processor.cpufeature = (boot_cpu_data.x86 << 8) | processor.cpufeature = (boot_cpu_data.x86 << 8) |
(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
processor.featureflag = boot_cpu_data.x86_capability[0]; processor.featureflag = boot_cpu_data.x86_capability[0];
processor.reserved[0] = 0; processor.reserved[0] = 0;
processor.reserved[1] = 0; processor.reserved[1] = 0;
......
...@@ -166,7 +166,7 @@ static int via_rng_init(struct hwrng *rng) ...@@ -166,7 +166,7 @@ static int via_rng_init(struct hwrng *rng)
/* Enable secondary noise source on CPUs where it is present. */ /* Enable secondary noise source on CPUs where it is present. */
/* Nehemiah stepping 8 and higher */ /* Nehemiah stepping 8 and higher */
if ((c->x86_model == 9) && (c->x86_mask > 7)) if ((c->x86_model == 9) && (c->x86_stepping > 7))
lo |= VIA_NOISESRC2; lo |= VIA_NOISESRC2;
/* Esther */ /* Esther */
......
...@@ -639,7 +639,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) ...@@ -639,7 +639,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
if (c->x86_vendor == X86_VENDOR_INTEL) { if (c->x86_vendor == X86_VENDOR_INTEL) {
if ((c->x86 == 15) && if ((c->x86 == 15) &&
(c->x86_model == 6) && (c->x86_model == 6) &&
(c->x86_mask == 8)) { (c->x86_stepping == 8)) {
printk(KERN_INFO "acpi-cpufreq: Intel(R) " printk(KERN_INFO "acpi-cpufreq: Intel(R) "
"Xeon(R) 7100 Errata AL30, processors may " "Xeon(R) 7100 Errata AL30, processors may "
"lock up on frequency changes: disabling " "lock up on frequency changes: disabling "
......
...@@ -786,7 +786,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) ...@@ -786,7 +786,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
break; break;
case 7: case 7:
switch (c->x86_mask) { switch (c->x86_stepping) {
case 0: case 0:
longhaul_version = TYPE_LONGHAUL_V1; longhaul_version = TYPE_LONGHAUL_V1;
cpu_model = CPU_SAMUEL2; cpu_model = CPU_SAMUEL2;
...@@ -798,7 +798,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) ...@@ -798,7 +798,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
break; break;
case 1 ... 15: case 1 ... 15:
longhaul_version = TYPE_LONGHAUL_V2; longhaul_version = TYPE_LONGHAUL_V2;
if (c->x86_mask < 8) { if (c->x86_stepping < 8) {
cpu_model = CPU_SAMUEL2; cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]"; cpuname = "C3 'Samuel 2' [C5B]";
} else { } else {
...@@ -825,7 +825,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) ...@@ -825,7 +825,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
numscales = 32; numscales = 32;
memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
switch (c->x86_mask) { switch (c->x86_stepping) {
case 0 ... 1: case 0 ... 1:
cpu_model = CPU_NEHEMIAH; cpu_model = CPU_NEHEMIAH;
cpuname = "C3 'Nehemiah A' [C5XLOE]"; cpuname = "C3 'Nehemiah A' [C5XLOE]";
......
...@@ -176,7 +176,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) ...@@ -176,7 +176,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
#endif #endif
/* Errata workaround */ /* Errata workaround */
cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
switch (cpuid) { switch (cpuid) {
case 0x0f07: case 0x0f07:
case 0x0f0a: case 0x0f0a:
......
...@@ -132,7 +132,7 @@ static int check_powernow(void) ...@@ -132,7 +132,7 @@ static int check_powernow(void)
return 0; return 0;
} }
if ((c->x86_model == 6) && (c->x86_mask == 0)) { if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
printk(KERN_INFO PFX "K7 660[A0] core detected, " printk(KERN_INFO PFX "K7 660[A0] core detected, "
"enabling errata workarounds\n"); "enabling errata workarounds\n");
have_a0 = 1; have_a0 = 1;
......
...@@ -36,7 +36,7 @@ struct cpu_id ...@@ -36,7 +36,7 @@ struct cpu_id
{ {
__u8 x86; /* CPU family */ __u8 x86; /* CPU family */
__u8 x86_model; /* model */ __u8 x86_model; /* model */
__u8 x86_mask; /* stepping */ __u8 x86_stepping; /* stepping */
}; };
enum { enum {
...@@ -276,7 +276,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, ...@@ -276,7 +276,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
{ {
if ((c->x86 == x->x86) && if ((c->x86 == x->x86) &&
(c->x86_model == x->x86_model) && (c->x86_model == x->x86_model) &&
(c->x86_mask == x->x86_mask)) (c->x86_stepping == x->x86_stepping))
return 1; return 1;
return 0; return 0;
} }
......
...@@ -270,9 +270,9 @@ unsigned int speedstep_detect_processor(void) ...@@ -270,9 +270,9 @@ unsigned int speedstep_detect_processor(void)
ebx = cpuid_ebx(0x00000001); ebx = cpuid_ebx(0x00000001);
ebx &= 0x000000FF; ebx &= 0x000000FF;
pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
switch (c->x86_mask) { switch (c->x86_stepping) {
case 4: case 4:
/* /*
* B-stepping [M-P4-M] * B-stepping [M-P4-M]
...@@ -359,7 +359,7 @@ unsigned int speedstep_detect_processor(void) ...@@ -359,7 +359,7 @@ unsigned int speedstep_detect_processor(void)
msr_lo, msr_hi); msr_lo, msr_hi);
if ((msr_hi & (1<<18)) && if ((msr_hi & (1<<18)) &&
(relaxed_check ? 1 : (msr_hi & (3<<24)))) { (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
if (c->x86_mask == 0x01) { if (c->x86_stepping == 0x01) {
pr_debug("early PIII version\n"); pr_debug("early PIII version\n");
return SPEEDSTEP_CPU_PIII_C_EARLY; return SPEEDSTEP_CPU_PIII_C_EARLY;
} else } else
......
...@@ -535,7 +535,7 @@ static int __init padlock_init(void) ...@@ -535,7 +535,7 @@ static int __init padlock_init(void)
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
......
...@@ -2715,7 +2715,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) ...@@ -2715,7 +2715,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
struct amd64_family_type *fam_type = NULL; struct amd64_family_type *fam_type = NULL;
pvt->ext_model = boot_cpu_data.x86_model >> 4; pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->stepping = boot_cpu_data.x86_mask; pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model; pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86; pvt->fam = boot_cpu_data.x86;
......
...@@ -760,7 +760,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) ...@@ -760,7 +760,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s", pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
m->extcpu, m->extcpu,
c->x86, c->x86_model, c->x86_mask, c->x86, c->x86_model, c->x86_stepping,
m->bank, m->bank,
((m->status & MCI_STATUS_OVER) ? "Over" : "-"), ((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
((m->status & MCI_STATUS_UC) ? "UE" : ((m->status & MCI_STATUS_UC) ? "UE" :
......
...@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) ...@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
const struct tjmax_model *tm = &tjmax_model_table[i]; const struct tjmax_model *tm = &tjmax_model_table[i];
if (c->x86_model == tm->model && if (c->x86_model == tm->model &&
(tm->mask == ANY || c->x86_mask == tm->mask)) (tm->mask == ANY || c->x86_stepping == tm->mask))
return tm->tjmax; return tm->tjmax;
} }
/* Early chips have no MSR for TjMax */ /* Early chips have no MSR for TjMax */
if (c->x86_model == 0xf && c->x86_mask < 4) if (c->x86_model == 0xf && c->x86_stepping < 4)
usemsr_ee = 0; usemsr_ee = 0;
if (c->x86_model > 0xe && usemsr_ee) { if (c->x86_model > 0xe && usemsr_ee) {
...@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu) ...@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu)
* Readings might stop update when processor visited too deep sleep, * Readings might stop update when processor visited too deep sleep,
* fixed for stepping D0 (6EC). * fixed for stepping D0 (6EC).
*/ */
if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -293,7 +293,7 @@ u8 vid_which_vrm(void) ...@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
if (c->x86 < 6) /* Any CPU with family lower than 6 */ if (c->x86 < 6) /* Any CPU with family lower than 6 */
return 0; /* doesn't have VID */ return 0; /* doesn't have VID */
vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
if (vrm_ret == 134) if (vrm_ret == 134)
vrm_ret = get_via_model_d_vrm(); vrm_ret = get_via_model_d_vrm();
if (vrm_ret == 0) if (vrm_ret == 0)
......
...@@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_dev *pdev) ...@@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
* and AM3 formats, but that's the best we can do. * and AM3 formats, but that's the best we can do.
*/ */
return boot_cpu_data.x86_model < 4 || return boot_cpu_data.x86_model < 4 ||
(boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
} }
static int k10temp_probe(struct pci_dev *pdev, static int k10temp_probe(struct pci_dev *pdev,
......
...@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev, ...@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
return -ENOMEM; return -ENOMEM;
model = boot_cpu_data.x86_model; model = boot_cpu_data.x86_model;
stepping = boot_cpu_data.x86_mask; stepping = boot_cpu_data.x86_stepping;
/* feature available since SH-C0, exclude older revisions */ /* feature available since SH-C0, exclude older revisions */
if ((model == 4 && stepping == 0) || if ((model == 4 && stepping == 0) ||
......
...@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info) ...@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
int timeout = 1000; int timeout = 1000;
/* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
if (cpu_data(0).x86_mask == 1) { if (cpu_data(0).x86_stepping == 1) {
pll_table = gx_pll_table_14MHz; pll_table = gx_pll_table_14MHz;
pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment