Commit 0afe2db2 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/unify-cpu-detect' into x86-v28-for-linus-phase4-D

Conflicts:
	arch/x86/kernel/cpu/common.c
	arch/x86/kernel/signal_64.c
	include/asm-x86/cpufeature.h
parents d8470596 43603c8d
......@@ -419,6 +419,60 @@ config X86_DEBUGCTLMSR
def_bool y
depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386)
menuconfig PROCESSOR_SELECT
default y
bool "Supported processor vendors" if EMBEDDED
help
This lets you choose what x86 vendor support code your kernel
will include.
config CPU_SUP_INTEL
default y
bool "Support Intel processors" if PROCESSOR_SELECT
help
This enables extended support for Intel processors
config CPU_SUP_CYRIX_32
default y
bool "Support Cyrix processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for Cyrix processors
config CPU_SUP_AMD
default y
bool "Support AMD processors" if PROCESSOR_SELECT
help
This enables extended support for AMD processors
config CPU_SUP_CENTAUR_32
default y
bool "Support Centaur processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for Centaur processors
config CPU_SUP_CENTAUR_64
default y
bool "Support Centaur processors" if PROCESSOR_SELECT
depends on 64BIT
help
This enables extended support for Centaur processors
config CPU_SUP_TRANSMETA_32
default y
bool "Support Transmeta processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for Transmeta processors
config CPU_SUP_UMC_32
default y
bool "Support UMC processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for UMC processors
config X86_DS
bool "Debug Store support"
default y
......
......@@ -59,17 +59,18 @@ int validate_cpu(void)
u32 e = err_flags[i];
for (j = 0; j < 32; j++) {
int n = (i << 5)+j;
if (*msg_strs < n) {
if (msg_strs[0] < i ||
(msg_strs[0] == i && msg_strs[1] < j)) {
/* Skip to the next string */
do {
msg_strs++;
} while (*msg_strs);
msg_strs++;
msg_strs += 2;
while (*msg_strs++)
;
}
if (e & 1) {
if (*msg_strs == n && msg_strs[1])
printf("%s ", msg_strs+1);
if (msg_strs[0] == i &&
msg_strs[1] == j &&
msg_strs[2])
printf("%s ", msg_strs+2);
else
printf("%d:%d ", i, j);
}
......
......@@ -15,33 +15,33 @@
#include <stdio.h>
#include "../kernel/cpu/feature_names.c"
#if NCAPFLAGS > 8
# error "Need to adjust the boot code handling of CPUID strings"
#endif
#include "../kernel/cpu/capflags.c"
int main(void)
{
int i;
int i, j;
const char *str;
printf("static const char x86_cap_strs[] = \n");
for (i = 0; i < NCAPINTS*32; i++) {
str = x86_cap_flags[i];
if (i == NCAPINTS*32-1) {
/* The last entry must be unconditional; this
also consumes the compiler-added null character */
if (!str)
str = "";
printf("\t\"\\x%02x\"\"%s\"\n", i, str);
} else if (str) {
printf("#if REQUIRED_MASK%d & (1 << %d)\n"
"\t\"\\x%02x\"\"%s\\0\"\n"
"#endif\n",
i >> 5, i & 31, i, str);
for (i = 0; i < NCAPINTS; i++) {
for (j = 0; j < 32; j++) {
str = x86_cap_flags[i*32+j];
if (i == NCAPINTS-1 && j == 31) {
/* The last entry must be unconditional; this
also consumes the compiler-added null
character */
if (!str)
str = "";
printf("\t\"\\x%02x\\x%02x\"\"%s\"\n",
i, j, str);
} else if (str) {
printf("#if REQUIRED_MASK%d & (1 << %d)\n"
"\t\"\\x%02x\\x%02x\"\"%s\\0\"\n"
"#endif\n",
i, j, i, j, str);
}
}
}
printf("\t;\n");
......
#
# Makefile for the linux kernel.
#
obj-$(CONFIG_X86_ES7000) := es7000plat.o
/*
* Written by: Garry Forsgren, Unisys Corporation
* Natalie Protasevich, Unisys Corporation
* This file contains the code to configure and interface
* with Unisys ES7000 series hardware system manager.
*
* Copyright (c) 2003 Unisys Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Unisys Corporation, Township Line & Union Meeting
* Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or:
*
* http://www.unisys.com
*/
/*
* ES7000 chipsets
*/
#define NON_UNISYS 0
#define ES7000_CLASSIC 1
#define ES7000_ZORRO 2
#define MIP_REG 1
#define MIP_PSAI_REG 4
#define MIP_BUSY 1
#define MIP_SPIN 0xf0000
#define MIP_VALID 0x0100000000000000ULL
#define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff)
#define MIP_RD_LO(VALUE) (VALUE & 0xffffffff)
struct mip_reg_info {
unsigned long long mip_info;
unsigned long long delivery_info;
unsigned long long host_reg;
unsigned long long mip_reg;
};
struct part_info {
unsigned char type;
unsigned char length;
unsigned char part_id;
unsigned char apic_mode;
unsigned long snum;
char ptype[16];
char sname[64];
char pname[64];
};
struct psai {
unsigned long long entry_type;
unsigned long long addr;
unsigned long long bep_addr;
};
struct es7000_mem_info {
unsigned char type;
unsigned char length;
unsigned char resv[6];
unsigned long long start;
unsigned long long size;
};
struct es7000_oem_table {
unsigned long long hdr;
struct mip_reg_info mip;
struct part_info pif;
struct es7000_mem_info shm;
struct psai psai;
};
#ifdef CONFIG_ACPI
struct oem_table {
struct acpi_table_header Header;
u32 OEMTableAddr;
u32 OEMTableSize;
};
extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
#endif
struct mip_reg {
unsigned long long off_0;
unsigned long long off_8;
unsigned long long off_10;
unsigned long long off_18;
unsigned long long off_20;
unsigned long long off_28;
unsigned long long off_30;
unsigned long long off_38;
};
#define MIP_SW_APIC 0x1020b
#define MIP_FUNC(VALUE) (VALUE & 0xff)
extern int parse_unisys_oem (char *oemptr);
extern void setup_unisys(void);
extern int es7000_start_cpu(int cpu, unsigned long eip);
extern void es7000_sw_apic(void);
......@@ -179,9 +179,10 @@ struct sigframe
u32 pretcode;
int sig;
struct sigcontext_ia32 sc;
struct _fpstate_ia32 fpstate;
struct _fpstate_ia32 fpstate_unused; /* look at kernel/sigframe.h */
unsigned int extramask[_COMPAT_NSIG_WORDS-1];
char retcode[8];
/* fp state follows here */
};
struct rt_sigframe
......@@ -192,8 +193,8 @@ struct rt_sigframe
u32 puc;
compat_siginfo_t info;
struct ucontext_ia32 uc;
struct _fpstate_ia32 fpstate;
char retcode[8];
/* fp state follows here */
};
#define COPY(x) { \
......@@ -215,7 +216,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
unsigned int *peax)
{
unsigned int tmpflags, gs, oldgs, err = 0;
struct _fpstate_ia32 __user *buf;
void __user *buf;
u32 tmp;
/* Always make any pending restarted system calls return -EINTR */
......@@ -259,26 +260,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
err |= __get_user(tmp, &sc->fpstate);
buf = compat_ptr(tmp);
if (buf) {
if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= restore_i387_ia32(buf);
} else {
struct task_struct *me = current;
if (used_math()) {
clear_fpu(me);
clear_used_math();
}
}
err |= restore_i387_xstate_ia32(buf);
err |= __get_user(tmp, &sc->ax);
*peax = tmp;
return err;
badframe:
return 1;
}
asmlinkage long sys32_sigreturn(struct pt_regs *regs)
......@@ -350,7 +337,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
*/
static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
struct _fpstate_ia32 __user *fpstate,
void __user *fpstate,
struct pt_regs *regs, unsigned int mask)
{
int tmp, err = 0;
......@@ -380,7 +367,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
err |= __put_user((u32)regs->flags, &sc->flags);
err |= __put_user((u32)regs->sp, &sc->sp_at_signal);
tmp = save_i387_ia32(fpstate);
tmp = save_i387_xstate_ia32(fpstate);
if (tmp < 0)
err = -EFAULT;
else {
......@@ -401,7 +388,8 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
* Determine which stack to use..
*/
static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
size_t frame_size,
void **fpstate)
{
unsigned long sp;
......@@ -420,6 +408,11 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
ka->sa.sa_restorer)
sp = (unsigned long) ka->sa.sa_restorer;
if (used_math()) {
sp = sp - sig_xstate_ia32_size;
*fpstate = (struct _fpstate_ia32 *) sp;
}
sp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0. */
......@@ -433,6 +426,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
struct sigframe __user *frame;
void __user *restorer;
int err = 0;
void __user *fpstate = NULL;
/* copy_to_user optimizes that into a single 8 byte store */
static const struct {
......@@ -447,7 +441,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
0,
};
frame = get_sigframe(ka, regs, sizeof(*frame));
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
......@@ -456,8 +450,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
if (err)
goto give_sigsegv;
err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs,
set->sig[0]);
err |= ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]);
if (err)
goto give_sigsegv;
......@@ -521,6 +514,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe __user *frame;
void __user *restorer;
int err = 0;
void __user *fpstate = NULL;
/* __copy_to_user optimizes that into a single 8 byte store */
static const struct {
......@@ -536,7 +530,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
0,
};
frame = get_sigframe(ka, regs, sizeof(*frame));
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
......@@ -549,13 +543,16 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
goto give_sigsegv;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
if (cpu_has_xsave)
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
else
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
......
......@@ -38,7 +38,7 @@ obj-y += tsc.o io_delay.o rtc.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o
obj-y += i387.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
obj-y += ds.o
obj-$(CONFIG_X86_32) += tls.o
......@@ -69,6 +69,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-$(CONFIG_X86_ES7000) += es7000_32.o
obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
obj-y += vsmp_64.o
obj-$(CONFIG_KPROBES) += kprobes.o
......
......@@ -3,22 +3,30 @@
#
obj-y := intel_cacheinfo.o addon_cpuid_features.o
obj-y += proc.o feature_names.o
obj-$(CONFIG_X86_32) += common.o bugs.o
obj-$(CONFIG_X86_64) += common_64.o bugs_64.o
obj-$(CONFIG_X86_32) += amd.o
obj-$(CONFIG_X86_64) += amd_64.o
obj-$(CONFIG_X86_32) += cyrix.o
obj-$(CONFIG_X86_32) += centaur.o
obj-$(CONFIG_X86_64) += centaur_64.o
obj-$(CONFIG_X86_32) += transmeta.o
obj-$(CONFIG_X86_32) += intel.o
obj-$(CONFIG_X86_64) += intel_64.o
obj-$(CONFIG_X86_32) += umc.o
obj-y += proc.o capflags.o powerflags.o common.o
obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
obj-$(CONFIG_X86_64) += bugs_64.o
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
quiet_cmd_mkcapflags = MKCAP $@
cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
cpufeature = $(src)/../../../../include/asm-x86/cpufeature.h
targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE
$(call if_changed,mkcapflags)
......@@ -7,6 +7,8 @@
#include <asm/pat.h>
#include <asm/processor.h>
#include <mach_apic.h>
struct cpuid_bit {
u16 feature;
u8 reg;
......@@ -48,6 +50,92 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
}
}
/* leaf 0xb SMT level */
#define SMT_LEVEL 0
/* leaf 0xb sub-leaf types */
#define INVALID_TYPE 0
#define SMT_TYPE 1
#define CORE_TYPE 2
#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
/*
* Check for extended topology enumeration cpuid leaf 0xb and if it
* exists, use it for populating initial_apicid and cpu topology
* detection.
*/
void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
if (c->cpuid_level < 0xb)
return;
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
/*
* check if the cpuid leaf 0xb is actually implemented.
*/
if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
return;
set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
/*
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->initial_apicid = edx;
/*
* Populate HT related information from sub-leaf level 0.
*/
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
sub_index = 1;
do {
cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
/*
* Check for the Core type in the implemented sub leaves.
*/
if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
break;
}
sub_index++;
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
#ifdef CONFIG_X86_32
c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
& core_select_mask;
c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
#else
c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
#endif
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
return;
#endif
}
#ifdef CONFIG_X86_PAT
void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
{
......
This diff is collapsed.
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/numa_64.h>
#include <asm/mmconfig.h>
#include <asm/cacheflush.h>
#include <mach_apic.h>
#include "cpu.h"
int force_mwait __cpuinitdata;
#ifdef CONFIG_NUMA
static int __cpuinit nearby_node(int apicid)
{
int i, node;
for (i = apicid - 1; i >= 0; i--) {
node = apicid_to_node[i];
if (node != NUMA_NO_NODE && node_online(node))
return node;
}
for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
node = apicid_to_node[i];
if (node != NUMA_NO_NODE && node_online(node))
return node;
}
return first_node(node_online_map); /* Shouldn't happen */
}
#endif
/*
* On a AMD dual core setup the lower bits of the APIC id distingush the cores.
* Assumes number of cores is a power of two.
*/
static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned bits;
#ifdef CONFIG_NUMA
int cpu = smp_processor_id();
int node = 0;
unsigned apicid = hard_smp_processor_id();
#endif
bits = c->x86_coreid_bits;
/* Low order bits define the core id (index of core in socket) */
c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
/* Convert the initial APIC ID into the socket ID */
c->phys_proc_id = c->initial_apicid >> bits;
#ifdef CONFIG_NUMA
node = c->phys_proc_id;
if (apicid_to_node[apicid] != NUMA_NO_NODE)
node = apicid_to_node[apicid];
if (!node_online(node)) {
/* Two possibilities here:
- The CPU is missing memory and no node was created.
In that case try picking one from a nearby CPU
- The APIC IDs differ from the HyperTransport node IDs
which the K8 northbridge parsing fills in.
Assume they are all increased by a constant offset,
but in the same order as the HT nodeids.
If that doesn't result in a usable node fall back to the
path for the previous case. */
int ht_nodeid = c->initial_apicid;
if (ht_nodeid >= 0 &&
apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
node = apicid_to_node[ht_nodeid];
/* Pick a nearby node */
if (!node_online(node))
node = nearby_node(apicid);
}
numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
#endif
#endif
}
static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned bits, ecx;
/* Multi core CPU? */
if (c->extended_cpuid_level < 0x80000008)
return;
ecx = cpuid_ecx(0x80000008);
c->x86_max_cores = (ecx & 0xff) + 1;
/* CPU telling us the core id bits shift? */
bits = (ecx >> 12) & 0xF;
/* Otherwise recompute */
if (bits == 0) {
while ((1 << bits) < c->x86_max_cores)
bits++;
}
c->x86_coreid_bits = bits;
#endif
}
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
early_init_amd_mc(c);
/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
if (c->x86_power & (1<<8))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
}
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
unsigned level;
#ifdef CONFIG_SMP
unsigned long value;
/*
* Disable TLB flush filter by setting HWCR.FFDIS on K8
* bit 6 of msr C001_0015
*
* Errata 63 for SH-B3 steppings
* Errata 122 for all steppings (F+ have it disabled by default)
*/
if (c->x86 == 0xf) {
rdmsrl(MSR_K8_HWCR, value);
value |= 1 << 6;
wrmsrl(MSR_K8_HWCR, value);
}
#endif
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_cpu_cap(c, 0*32+31);
/* On C+ stepping K8 rep microcode works well for copy/memset */
if (c->x86 == 0xf) {
level = cpuid_eax(1);
if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
if (c->x86 == 0x10 || c->x86 == 0x11)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
/* Enable workaround for FXSAVE leak */
if (c->x86 >= 6)
set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
level = get_model_name(c);
if (!level) {
switch (c->x86) {
case 0xf:
/* Should distinguish Models here, but this is only
a fallback anyways. */
strcpy(c->x86_model_id, "Hammer");
break;
}
}
display_cacheinfo(c);
/* Multi core CPU? */
if (c->extended_cpuid_level >= 0x80000008)
amd_detect_cmp(c);
if (c->extended_cpuid_level >= 0x80000006 &&
(cpuid_edx(0x80000006) & 0xf000))
num_cache_leaves = 4;
else
num_cache_leaves = 3;
if (c->x86 >= 0xf && c->x86 <= 0x11)
set_cpu_cap(c, X86_FEATURE_K8);
/* MFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
if (c->x86 == 0x10) {
/* do this for boot cpu */
if (c == &boot_cpu_data)
check_enable_amd_mmconf_dmi();
fam10h_check_enable_mmcfg();
}
if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
unsigned long long tseg;
/*
* Split up direct mapping around the TSEG SMM area.
* Don't do it for gbpages because there seems very little
* benefit in doing so.
*/
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
printk(KERN_DEBUG "tseg: %010llx\n", tseg);
if ((tseg>>PMD_SHIFT) <
(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
((tseg>>PMD_SHIFT) <
(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
set_memory_4k((unsigned long)__va(tseg), 1);
}
}
}
static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
.c_early_init = early_init_amd,
.c_init = init_amd,
};
cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
......@@ -289,7 +289,6 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
if (c->x86_model >= 6 && c->x86_model < 9)
set_cpu_cap(c, X86_FEATURE_3DNOW);
get_model_name(c);
display_cacheinfo(c);
}
......@@ -475,6 +474,7 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.c_early_init = early_init_centaur,
.c_init = init_centaur,
.c_size_cache = centaur_size_cache,
.c_x86_vendor = X86_VENDOR_CENTAUR,
};
cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev);
cpu_dev_register(centaur_cpu_dev);
......@@ -16,9 +16,10 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
{
early_init_centaur(c);
if (c->x86 == 0x6 && c->x86_model >= 0xf) {
c->x86_cache_alignment = c->x86_clflush_size * 2;
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
......@@ -29,7 +30,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.c_ident = { "CentaurHauls" },
.c_early_init = early_init_centaur,
.c_init = init_centaur,
.c_x86_vendor = X86_VENDOR_CENTAUR,
};
cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev);
cpu_dev_register(centaur_cpu_dev);
/*
* cmpxchg*() fallbacks for CPU not supporting these instructions
*/
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/module.h>
#ifndef CONFIG_X86_CMPXCHG
unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
{
u8 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u8 *)ptr;
if (prev == old)
*(u8 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u8);
unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
{
u16 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u16 *)ptr;
if (prev == old)
*(u16 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u16);
unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
{
u32 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u32 *)ptr;
if (prev == old)
*(u32 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u32);
#endif
#ifndef CONFIG_X86_CMPXCHG64
unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
{
u64 prev;
unsigned long flags;
/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_486_u64);
#endif
This diff is collapsed.
This diff is collapsed.
......@@ -21,23 +21,16 @@ struct cpu_dev {
void (*c_init)(struct cpuinfo_x86 * c);
void (*c_identify)(struct cpuinfo_x86 * c);
unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
int c_x86_vendor;
};
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
#define cpu_dev_register(cpu_devX) \
static struct cpu_dev *__cpu_dev_##cpu_devX __used \
__attribute__((__section__(".x86_cpu_dev.init"))) = \
&cpu_devX;
struct cpu_vendor_dev {
int vendor;
struct cpu_dev *cpu_dev;
};
#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
__attribute__((__section__(".x86cpuvendor.init"))) = \
{ cpu_vendor_id, cpu_dev }
extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[];
extern int get_model_name(struct cpuinfo_x86 *c);
extern void display_cacheinfo(struct cpuinfo_x86 *c);
#endif
......@@ -121,7 +121,7 @@ static void __cpuinit set_cx86_reorder(void)
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* Load/Store Serialize to mem access disable (=reorder it) */
setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
/* set load/store serialize from 1GB to 4GB */
ccr3 |= 0xe0;
setCx86(CX86_CCR3, ccr3);
......@@ -132,11 +132,11 @@ static void __cpuinit set_cx86_memwb(void)
printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
/* CCR2 bit 2: unlock NW bit */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
/* set 'Not Write-through' */
write_cr0(read_cr0() | X86_CR0_NW);
/* CCR2 bit 2: lock NW bit and set WT1 */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
}
/*
......@@ -150,14 +150,14 @@ static void __cpuinit geode_configure(void)
local_irq_save(flags);
/* Suspend on halt power saving and enable #SUSP pin */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* FPU fast, DTE cache, Mem bypass */
setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
set_cx86_memwb();
......@@ -291,7 +291,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
/* Enable cxMMX extensions (GX1 Datasheet 54) */
setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
/*
* GXm : 0x30 ... 0x5f GXm datasheet 51
......@@ -301,7 +301,6 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
*/
if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f))
geode_configure();
get_model_name(c); /* get CPU marketing name */
return;
} else { /* MediaGX */
Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
......@@ -314,7 +313,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
if (dir1 > 7) {
dir0_msn++; /* M II */
/* Enable MMX extensions (App note 108) */
setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
} else {
c->coma_bug = 1; /* 6x86MX, it has the bug. */
}
......@@ -429,7 +428,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
local_irq_restore(flags);
}
......@@ -442,14 +441,16 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
.c_early_init = early_init_cyrix,
.c_init = init_cyrix,
.c_identify = cyrix_identify,
.c_x86_vendor = X86_VENDOR_CYRIX,
};
cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev);
cpu_dev_register(cyrix_cpu_dev);
static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
.c_vendor = "NSC",
.c_ident = { "Geode by NSC" },
.c_init = init_nsc,
.c_x86_vendor = X86_VENDOR_NSC,
};
cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev);
cpu_dev_register(nsc_cpu_dev);
/*
* Strings for the various x86 capability flags.
*
* This file must not contain any executable code.
*/
#include <asm/cpufeature.h>
/*
* These flag bits must match the definitions in <asm/cpufeature.h>.
* NULL means this bit is undefined or reserved; either way it doesn't
* have meaning as far as Linux is concerned. Note that it's important
* to realize there is a difference between this table and CPUID -- if
* applications want to get the raw CPUID data, they should access
* /dev/cpu/<cpu_nr>/cpuid instead.
*/
const char * const x86_cap_flags[NCAPINTS*32] = {
/* Intel-defined */
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
"pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
"fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
/* AMD-defined */
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
"3dnowext", "3dnow",
/* Transmeta-defined */
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Other (Linux-defined) */
"cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
NULL, NULL, NULL, NULL,
"constant_tsc", "up", NULL, "arch_perfmon",
"pebs", "bts", NULL, NULL,
"rep_good", NULL, NULL, NULL,
"nopl", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Intel-defined (#2) */
"pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
"tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
NULL, NULL, "dca", "sse4_1", "sse4_2", "x2apic", NULL, "popcnt",
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* VIA/Cyrix/Centaur-defined */
NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
"ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* AMD-defined (#2) */
"lahf_lm", "cmp_legacy", "svm", "extapic",
"cr8_legacy", "abm", "sse4a", "misalignsse",
"3dnowprefetch", "osvw", "ibs", "sse5",
"skinit", "wdt", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Auxiliary (Linux-defined) */
"ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
const char *const x86_power_flags[32] = {
"ts", /* temperature sensor */
"fid", /* frequency id control */
"vid", /* voltage id control */
"ttp", /* thermal trip */
"tm",
"stc",
"100mhzsteps",
"hwpstate",
"", /* tsc invariant mapped to constant_tsc */
/* nothing */
};
This diff is collapsed.
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/topology.h>
#include <asm/numa_64.h>
#include "cpu.h"
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
}
/*
* find out the number of processor cores on the die
*/
static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
{
unsigned int eax, t;
if (c->cpuid_level < 4)
return 1;
cpuid_count(4, 0, &eax, &t, &t, &t);
if (eax & 0x1f)
return ((eax >> 26) + 1);
else
return 1;
}
static void __cpuinit srat_detect_node(void)
{
#ifdef CONFIG_NUMA
unsigned node;
int cpu = smp_processor_id();
int apicid = hard_smp_processor_id();
/* Don't do the funky fallback heuristics the AMD version employs
for now. */
node = apicid_to_node[apicid];
if (node == NUMA_NO_NODE || !node_online(node))
node = first_node(node_online_map);
numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
#endif
}
static void __cpuinit init_intel(struct cpuinfo_x86 *c)
{
init_intel_cacheinfo(c);
if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10);
/* Check for version and the number of counters */
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
}
if (cpu_has_ds) {
unsigned int l1, l2;
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
if (!(l1 & (1<<11)))
set_cpu_cap(c, X86_FEATURE_BTS);
if (!(l1 & (1<<12)))
set_cpu_cap(c, X86_FEATURE_PEBS);
}
if (cpu_has_bts)
ds_init_intel(c);
if (c->x86 == 15)
c->x86_cache_alignment = c->x86_clflush_size * 2;
if (c->x86 == 6)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
c->x86_max_cores = intel_num_cpu_cores(c);
srat_detect_node();
}
static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
.c_early_init = early_init_intel,
.c_init = init_intel,
};
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
/*
* Routines to indentify caches on Intel CPU.
* Routines to indentify caches on Intel CPU.
*
* Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
* Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
* Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
*/
......@@ -13,6 +13,7 @@
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <asm/processor.h>
#include <asm/smp.h>
......@@ -130,9 +131,18 @@ struct _cpuid4_info {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
unsigned long can_disable;
cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
};
#ifdef CONFIG_PCI
static struct pci_device_id k8_nb_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
{}
};
#endif
unsigned short num_cache_leaves;
/* AMD doesn't have CPUID4. Emulate it here to report the same
......@@ -182,9 +192,10 @@ static unsigned short assocs[] __cpuinitdata = {
static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
union _cpuid4_leaf_ecx *ecx)
static void __cpuinit
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
union _cpuid4_leaf_ecx *ecx)
{
unsigned dummy;
unsigned line_size, lines_per_tag, assoc, size_in_kb;
......@@ -251,27 +262,40 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
(ebx->split.ways_of_associativity + 1) - 1;
}
static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
{
if (index < 3)
return;
this_leaf->can_disable = 1;
}
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned edx;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
amd_cpuid4(index, &eax, &ebx, &ecx);
else
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
if (boot_cpu_data.x86 >= 0x10)
amd_check_l3_disable(index, this_leaf);
} else {
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
}
if (eax.split.type == CACHE_TYPE_NULL)
return -EIO; /* better error ? */
this_leaf->eax = eax;
this_leaf->ebx = ebx;
this_leaf->ecx = ecx;
this_leaf->size = (ecx.split.number_of_sets + 1) *
(ebx.split.coherency_line_size + 1) *
(ebx.split.physical_line_partition + 1) *
(ebx.split.ways_of_associativity + 1);
this_leaf->size = (ecx.split.number_of_sets + 1) *
(ebx.split.coherency_line_size + 1) *
(ebx.split.physical_line_partition + 1) *
(ebx.split.ways_of_associativity + 1);
return 0;
}
......@@ -453,7 +477,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
/* pointer to _cpuid4_info array (for each cache leaf) */
static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
......@@ -490,7 +514,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
this_leaf = CPUID4_INFO_IDX(cpu, index);
for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
cpu_clear(cpu, sibling_leaf->shared_cpu_map);
}
}
......@@ -572,7 +596,7 @@ struct _index_kobject {
/* pointer to array of kobjects for cpuX/cache/indexY */
static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
#define show_one_plus(file_name, object, val) \
static ssize_t show_##file_name \
......@@ -637,6 +661,99 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
}
}
#define to_object(k) container_of(k, struct _index_kobject, kobj)
#define to_attr(a) container_of(a, struct _cache_attr, attr)
#ifdef CONFIG_PCI
static struct pci_dev *get_k8_northbridge(int node)
{
struct pci_dev *dev = NULL;
int i;
for (i = 0; i <= node; i++) {
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (!dev)
break;
} while (!pci_match_id(&k8_nb_id[0], dev));
if (!dev)
break;
}
return dev;
}
#else
static struct pci_dev *get_k8_northbridge(int node)
{
return NULL;
}
#endif
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
{
int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
struct pci_dev *dev = NULL;
ssize_t ret = 0;
int i;
if (!this_leaf->can_disable)
return sprintf(buf, "Feature not enabled\n");
dev = get_k8_northbridge(node);
if (!dev) {
printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
return -EINVAL;
}
for (i = 0; i < 2; i++) {
unsigned int reg;
pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
ret += sprintf(buf, "%sEntry: %d\n", buf, i);
ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
buf,
reg & 0x80000000 ? "Disabled" : "Allowed",
reg & 0x40000000 ? "Disabled" : "Allowed");
ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
buf, (reg & 0x30000) >> 16, reg & 0xfff);
}
return ret;
}
static ssize_t
store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
size_t count)
{
int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
struct pci_dev *dev = NULL;
unsigned int ret, index, val;
if (!this_leaf->can_disable)
return 0;
if (strlen(buf) > 15)
return -EINVAL;
ret = sscanf(buf, "%x %x", &index, &val);
if (ret != 2)
return -EINVAL;
if (index > 1)
return -EINVAL;
val |= 0xc0000000;
dev = get_k8_northbridge(node);
if (!dev) {
printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
return -EINVAL;
}
pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
wbinvd();
pci_write_config_dword(dev, 0x1BC + index * 4, val);
return 1;
}
struct _cache_attr {
struct attribute attr;
ssize_t (*show)(struct _cpuid4_info *, char *);
......@@ -657,6 +774,8 @@ define_one_ro(size);
define_one_ro(shared_cpu_map);
define_one_ro(shared_cpu_list);
static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
static struct attribute * default_attrs[] = {
&type.attr,
&level.attr,
......@@ -667,12 +786,10 @@ static struct attribute * default_attrs[] = {
&size.attr,
&shared_cpu_map.attr,
&shared_cpu_list.attr,
&cache_disable.attr,
NULL
};
#define to_object(k) container_of(k, struct _index_kobject, kobj)
#define to_attr(a) container_of(a, struct _cache_attr, attr)
static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
{
struct _cache_attr *fattr = to_attr(attr);
......@@ -682,14 +799,22 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
ret = fattr->show ?
fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
buf) :
0;
0;
return ret;
}
static ssize_t store(struct kobject * kobj, struct attribute * attr,
const char * buf, size_t count)
{
return 0;
struct _cache_attr *fattr = to_attr(attr);
struct _index_kobject *this_leaf = to_object(kobj);
ssize_t ret;
ret = fattr->store ?
fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
buf, count) :
0;
return ret;
}
static struct sysfs_ops sysfs_ops = {
......
......@@ -860,7 +860,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
return err;
}
static void mce_remove_device(unsigned int cpu)
static __cpuinit void mce_remove_device(unsigned int cpu)
{
int i;
......
#!/usr/bin/perl
#
# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
#
($in, $out) = @ARGV;
open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n";
open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
print OUT "#include <asm/cpufeature.h>\n\n";
print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
while (defined($line = <IN>)) {
if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
$macro = $1;
$feature = $2;
$tail = $3;
if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
$feature = $1;
}
if ($feature ne '') {
printf OUT "\t%-32s = \"%s\",\n",
"[$macro]", "\L$feature";
}
}
}
print OUT "};\n";
close(IN);
close(OUT);
/*
* Strings for the various x86 power flags
*
* This file must not contain any executable code.
*/
#include <asm/cpufeature.h>
const char *const x86_power_flags[32] = {
"ts", /* temperature sensor */
"fid", /* frequency id control */
"vid", /* voltage id control */
"ttp", /* thermal trip */
"tm",
"stc",
"100mhzsteps",
"hwpstate",
"", /* tsc invariant mapped to constant_tsc */
/* nothing */
};
......@@ -5,6 +5,18 @@
#include <asm/msr.h>
#include "cpu.h"
static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
{
u32 xlvl;
/* Transmeta-defined flags: level 0x80860001 */
xlvl = cpuid_eax(0x80860000);
if ((xlvl & 0xffff0000) == 0x80860000) {
if (xlvl >= 0x80860001)
c->x86_capability[2] = cpuid_edx(0x80860001);
}
}
static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
{
unsigned int cap_mask, uk, max, dummy;
......@@ -12,7 +24,8 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev;
char cpu_info[65];
get_model_name(c); /* Same as AMD/Cyrix */
early_init_transmeta(c);
display_cacheinfo(c);
/* Print CMS and CPU revision */
......@@ -85,23 +98,12 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
#endif
}
static void __cpuinit transmeta_identify(struct cpuinfo_x86 *c)
{
u32 xlvl;
/* Transmeta-defined flags: level 0x80860001 */
xlvl = cpuid_eax(0x80860000);
if ((xlvl & 0xffff0000) == 0x80860000) {
if (xlvl >= 0x80860001)
c->x86_capability[2] = cpuid_edx(0x80860001);
}
}
static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
.c_vendor = "Transmeta",
.c_ident = { "GenuineTMx86", "TransmetaCPU" },
.c_early_init = early_init_transmeta,
.c_init = init_transmeta,
.c_identify = transmeta_identify,
.c_x86_vendor = X86_VENDOR_TRANSMETA,
};
cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev);
cpu_dev_register(transmeta_cpu_dev);
......@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
}
},
},
.c_x86_vendor = X86_VENDOR_UMC,
};
cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev);
cpu_dev_register(umc_cpu_dev);
......@@ -148,6 +148,9 @@ void __init e820_print_map(char *who)
case E820_NVS:
printk(KERN_CONT "(ACPI NVS)\n");
break;
case E820_UNUSABLE:
printk("(unusable)\n");
break;
default:
printk(KERN_CONT "type %u\n", e820.map[i].type);
break;
......@@ -1260,6 +1263,7 @@ static inline const char *e820_type_to_string(int e820_type)
case E820_RAM: return "System RAM";
case E820_ACPI: return "ACPI Tables";
case E820_NVS: return "ACPI Non-volatile Storage";
case E820_UNUSABLE: return "Unusable memory";
default: return "reserved";
}
}
......@@ -1267,6 +1271,7 @@ static inline const char *e820_type_to_string(int e820_type)
/*
* Mark e820 reserved areas as busy for the resource manager.
*/
static struct resource __initdata *e820_res;
void __init e820_reserve_resources(void)
{
int i;
......@@ -1274,6 +1279,7 @@ void __init e820_reserve_resources(void)
u64 end;
res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map);
e820_res = res;
for (i = 0; i < e820.nr_map; i++) {
end = e820.map[i].addr + e820.map[i].size - 1;
#ifndef CONFIG_RESOURCES_64BIT
......@@ -1287,7 +1293,14 @@ void __init e820_reserve_resources(void)
res->end = end;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
insert_resource(&iomem_resource, res);
/*
* don't register the region that could be conflicted with
* pci device BAR resource and insert them later in
* pcibios_resource_survey()
*/
if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20))
insert_resource(&iomem_resource, res);
res++;
}
......@@ -1299,6 +1312,19 @@ void __init e820_reserve_resources(void)
}
}
void __init e820_reserve_resources_late(void)
{
int i;
struct resource *res;
res = e820_res;
for (i = 0; i < e820.nr_map; i++) {
if (!res->parent && res->end)
reserve_region_with_split(&iomem_resource, res->start, res->end, res->name);
res++;
}
}
char *__init default_machine_specific_memory_setup(void)
{
char *who = "BIOS-e820";
......
......@@ -39,9 +39,92 @@
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/apicdef.h>
#include "es7000.h"
#include <mach_mpparse.h>
/*
* ES7000 chipsets
*/
#define NON_UNISYS 0
#define ES7000_CLASSIC 1
#define ES7000_ZORRO 2
#define MIP_REG 1
#define MIP_PSAI_REG 4
#define MIP_BUSY 1
#define MIP_SPIN 0xf0000
#define MIP_VALID 0x0100000000000000ULL
#define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff)
#define MIP_RD_LO(VALUE) (VALUE & 0xffffffff)
struct mip_reg_info {
unsigned long long mip_info;
unsigned long long delivery_info;
unsigned long long host_reg;
unsigned long long mip_reg;
};
struct part_info {
unsigned char type;
unsigned char length;
unsigned char part_id;
unsigned char apic_mode;
unsigned long snum;
char ptype[16];
char sname[64];
char pname[64];
};
struct psai {
unsigned long long entry_type;
unsigned long long addr;
unsigned long long bep_addr;
};
struct es7000_mem_info {
unsigned char type;
unsigned char length;
unsigned char resv[6];
unsigned long long start;
unsigned long long size;
};
struct es7000_oem_table {
unsigned long long hdr;
struct mip_reg_info mip;
struct part_info pif;
struct es7000_mem_info shm;
struct psai psai;
};
#ifdef CONFIG_ACPI
struct oem_table {
struct acpi_table_header Header;
u32 OEMTableAddr;
u32 OEMTableSize;
};
extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
#endif
struct mip_reg {
unsigned long long off_0;
unsigned long long off_8;
unsigned long long off_10;
unsigned long long off_18;
unsigned long long off_20;
unsigned long long off_28;
unsigned long long off_30;
unsigned long long off_38;
};
#define MIP_SW_APIC 0x1020b
#define MIP_FUNC(VALUE) (VALUE & 0xff)
/*
* ES7000 Globals
*/
......
......@@ -120,14 +120,9 @@ static unsigned long set_apic_id(unsigned int id)
return x;
}
static unsigned int x2apic_read_id(void)
{
return apic_read(APIC_ID);
}
static unsigned int phys_pkg_id(int index_msb)
{
return x2apic_read_id() >> index_msb;
return current_cpu_data.initial_apicid >> index_msb;
}
static void x2apic_send_IPI_self(int vector)
......
......@@ -118,14 +118,9 @@ static unsigned long set_apic_id(unsigned int id)
return x;
}
static unsigned int x2apic_read_id(void)
{
return apic_read(APIC_ID);
}
static unsigned int phys_pkg_id(int index_msb)
{
return x2apic_read_id() >> index_msb;
return current_cpu_data.initial_apicid >> index_msb;
}
void x2apic_send_IPI_self(int vector)
......
This diff is collapsed.
......@@ -15,7 +15,6 @@ unsigned long idle_nomwait;
EXPORT_SYMBOL(idle_nomwait);
struct kmem_cache *task_xstate_cachep;
static int force_mwait __cpuinitdata;
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
......
......@@ -3,9 +3,18 @@ struct sigframe {
char __user *pretcode;
int sig;
struct sigcontext sc;
struct _fpstate fpstate;
/*
* fpstate is unused. fpstate is moved/allocated after
* retcode[] below. This movement allows to have the FP state and the
* future state extensions (xsave) stay together.
* And at the same time retaining the unused fpstate, prevents changing
* the offset of extramask[] in the sigframe and thus prevent any
* legacy application accessing/modifying it.
*/
struct _fpstate fpstate_unused;
unsigned long extramask[_NSIG_WORDS-1];
char retcode[8];
/* fp state follows here */
};
struct rt_sigframe {
......@@ -15,14 +24,15 @@ struct rt_sigframe {
void __user *puc;
struct siginfo info;
struct ucontext uc;
struct _fpstate fpstate;
char retcode[8];
/* fp state follows here */
};
#else
struct rt_sigframe {
char __user *pretcode;
struct ucontext uc;
struct siginfo info;
/* fp state follows here */
};
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
......
This diff is collapsed.
This diff is collapsed.
......@@ -1228,7 +1228,6 @@ void __init trap_init(void)
set_bit(SYSCALL_VECTOR, used_vectors);
init_thread_xstate();
/*
* Should be a barrier for any external CPU state:
*/
......
......@@ -1138,7 +1138,7 @@ asmlinkage void math_state_restore(void)
/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
if (unlikely(restore_fpu_checking(me))) {
stts();
force_sig(SIGSEGV, me);
return;
......@@ -1178,10 +1178,6 @@ void __init trap_init(void)
#ifdef CONFIG_IA32_EMULATION
set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
#endif
/*
* initialize the per thread extended state:
*/
init_thread_xstate();
/*
* Should be a barrier for any external CPU state:
*/
......
......@@ -140,10 +140,10 @@ SECTIONS
*(.con_initcall.init)
__con_initcall_end = .;
}
.x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
__x86cpuvendor_start = .;
*(.x86cpuvendor.init)
__x86cpuvendor_end = .;
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
*(.x86_cpu_dev.init)
__x86_cpu_dev_end = .;
}
SECURITY_INIT
. = ALIGN(4);
......@@ -180,6 +180,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
__per_cpu_start = .;
*(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
......
......@@ -168,13 +168,12 @@ SECTIONS
*(.con_initcall.init)
}
__con_initcall_end = .;
. = ALIGN(16);
__x86cpuvendor_start = .;
.x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
*(.x86cpuvendor.init)
__x86_cpu_dev_start = .;
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
*(.x86_cpu_dev.init)
}
__x86cpuvendor_end = .;
SECURITY_INIT
__x86_cpu_dev_end = .;
. = ALIGN(8);
.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
......
This diff is collapsed.
This diff is collapsed.
......@@ -17,9 +17,6 @@ ifeq ($(CONFIG_X86_32),y)
lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else
obj-y += io_64.o iomap_copy_64.o
CFLAGS_csum-partial_64.o := -funroll-loops
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
lib-y += thunk_64.o clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment