Commit c44e3ed5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: cpu_debug: Remove model information to reduce encoding-decoding
  x86: fixup numa_node information for AMD CPU northbridge functions
  x86: k8 convert node_to_k8_nb_misc() from a macro to an inline function
  x86: cacheinfo: complete L2/L3 Cache and TLB associativity field definitions
  x86/docs: add description for cache_disable sysfs interface
  x86: cacheinfo: disable L3 ECC scrubbing when L3 cache index is disabled
  x86: cacheinfo: replace sysfs interface for cache_disable feature
  x86: cacheinfo: use cached K8 NB_MISC devices instead of scanning for it
  x86: cacheinfo: correct return value when cache_disable feature is not active
  x86: cacheinfo: use L3 cache index disable feature only for CPUs that support it
parents 7dc3ca39 5095f59b
What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X
Date: August 2008
KernelVersion: 2.6.27
Contact: mark.langsdorf@amd.com
Description: These files exist in every cpu's cache index directories.
There are currently 2 cache_disable_# files in each
directory. Reading from these files on a supported
processor will return that cache disable index value
for that processor and node. Writing to one of these
files will cause the specificed cache index to be disabled.
Currently, only AMD Family 10h Processors support cache index
disable, and only for their L3 caches. See the BIOS and
Kernel Developer's Guide at
http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/31116-Public-GH-BKDG_3.20_2-4-09.pdf
for formatting information and other details on the
cache index disable.
Users: joachim.deguara@amd.com
......@@ -88,104 +88,6 @@ enum cpu_file_bit {
#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
/*
* DisplayFamily_DisplayModel Processor Families/Processor Number Series
* -------------------------- ------------------------------------------
* 05_01, 05_02, 05_04 Pentium, Pentium with MMX
*
* 06_01 Pentium Pro
* 06_03, 06_05 Pentium II Xeon, Pentium II
* 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III
*
* 06_09, 060D Pentium M
*
* 06_0E Core Duo, Core Solo
*
* 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series,
* Core 2 Quad, Core 2 Extreme, Core 2 Duo,
* Pentium dual-core
* 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650
*
* 06_1C Atom
*
* 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4
* 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D
*
* 0F_06 Xeon 7100, 5000 Series, Xeon MP,
* Pentium 4, Pentium D
*/
/* Register processors bits */
enum cpu_processor_bit {
CPU_NONE,
/* Intel */
CPU_INTEL_PENTIUM_BIT,
CPU_INTEL_P6_BIT,
CPU_INTEL_PENTIUM_M_BIT,
CPU_INTEL_CORE_BIT,
CPU_INTEL_CORE2_BIT,
CPU_INTEL_ATOM_BIT,
CPU_INTEL_XEON_P4_BIT,
CPU_INTEL_XEON_MP_BIT,
/* AMD */
CPU_AMD_K6_BIT,
CPU_AMD_K7_BIT,
CPU_AMD_K8_BIT,
CPU_AMD_0F_BIT,
CPU_AMD_10_BIT,
CPU_AMD_11_BIT,
};
#define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT)
#define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT)
#define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT)
#define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT)
#define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT)
#define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT)
#define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT)
#define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT)
#define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M)
#define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2)
#define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP)
#define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM)
#define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM)
#define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM)
#define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON)
#define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON)
#define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT)
#define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON)
#define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON)
#define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT)
#define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX)
#define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE)
#define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE)
#define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT)
#define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE)
#define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT)
#define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE)
/* Select all supported Intel CPUs */
#define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE)
#define CPU_AMD_K6 (1 << CPU_AMD_K6_BIT)
#define CPU_AMD_K7 (1 << CPU_AMD_K7_BIT)
#define CPU_AMD_K8 (1 << CPU_AMD_K8_BIT)
#define CPU_AMD_0F (1 << CPU_AMD_0F_BIT)
#define CPU_AMD_10 (1 << CPU_AMD_10_BIT)
#define CPU_AMD_11 (1 << CPU_AMD_11_BIT)
#define CPU_K10_PLUS (CPU_AMD_10 | CPU_AMD_11)
#define CPU_K0F_PLUS (CPU_AMD_0F | CPU_K10_PLUS)
#define CPU_K8_PLUS (CPU_AMD_K8 | CPU_K0F_PLUS)
#define CPU_K7_PLUS (CPU_AMD_K7 | CPU_K8_PLUS)
/* Select all supported AMD CPUs */
#define CPU_AMD_ALL (CPU_AMD_K6 | CPU_K7_PLUS)
/* Select all supported CPUs */
#define CPU_ALL (CPU_INTEL_ALL | CPU_AMD_ALL)
#define MAX_CPU_FILES 512
struct cpu_private {
......@@ -220,7 +122,6 @@ struct cpu_debug_range {
unsigned min; /* Register range min */
unsigned max; /* Register range max */
unsigned flag; /* Supported flags */
unsigned model; /* Supported models */
};
#endif /* _ASM_X86_CPU_DEBUG_H */
......@@ -12,4 +12,17 @@ extern int cache_k8_northbridges(void);
extern void k8_flush_garts(void);
extern int k8_scan_nodes(unsigned long start, unsigned long end);
#ifdef CONFIG_K8_NB
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
}
#else
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return NULL;
}
#endif
#endif /* _ASM_X86_K8_H */
This diff is collapsed.
......@@ -17,6 +17,7 @@
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/k8.h>
#define LVL_1_INST 1
#define LVL_1_DATA 2
......@@ -159,14 +160,6 @@ struct _cpuid4_info_regs {
unsigned long can_disable;
};
#if defined(CONFIG_PCI) && defined(CONFIG_SYSFS)
static struct pci_device_id k8_nb_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
{}
};
#endif
unsigned short num_cache_leaves;
/* AMD doesn't have CPUID4. Emulate it here to report the same
......@@ -207,10 +200,17 @@ union l3_cache {
};
static const unsigned short __cpuinitconst assocs[] = {
[1] = 1, [2] = 2, [4] = 4, [6] = 8,
[8] = 16, [0xa] = 32, [0xb] = 48,
[1] = 1,
[2] = 2,
[4] = 4,
[6] = 8,
[8] = 16,
[0xa] = 32,
[0xb] = 48,
[0xc] = 64,
[0xf] = 0xffff // ??
[0xd] = 96,
[0xe] = 128,
[0xf] = 0xffff /* fully associative - no way to show this currently */
};
static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
......@@ -271,7 +271,8 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
eax->split.type = types[leaf];
eax->split.level = levels[leaf];
if (leaf == 3)
eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
eax->split.num_threads_sharing =
current_cpu_data.x86_max_cores - 1;
else
eax->split.num_threads_sharing = 0;
eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
......@@ -291,6 +292,14 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
if (index < 3)
return;
if (boot_cpu_data.x86 == 0x11)
return;
/* see erratum #382 */
if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
return;
this_leaf->can_disable = 1;
}
......@@ -696,97 +705,75 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
#define to_object(k) container_of(k, struct _index_kobject, kobj)
#define to_attr(a) container_of(a, struct _cache_attr, attr)
#ifdef CONFIG_PCI
static struct pci_dev *get_k8_northbridge(int node)
{
struct pci_dev *dev = NULL;
int i;
for (i = 0; i <= node; i++) {
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (!dev)
break;
} while (!pci_match_id(&k8_nb_id[0], dev));
if (!dev)
break;
}
return dev;
}
#else
static struct pci_dev *get_k8_northbridge(int node)
{
return NULL;
}
#endif
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
unsigned int index)
{
const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
int node = cpu_to_node(cpumask_first(mask));
struct pci_dev *dev = NULL;
ssize_t ret = 0;
int i;
int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
int node = cpu_to_node(cpu);
struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned int reg = 0;
if (!this_leaf->can_disable)
return sprintf(buf, "Feature not enabled\n");
dev = get_k8_northbridge(node);
if (!dev) {
printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
return -EINVAL;
}
for (i = 0; i < 2; i++) {
unsigned int reg;
if (!dev)
return -EINVAL;
pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
return sprintf(buf, "%x\n", reg);
}
ret += sprintf(buf, "%sEntry: %d\n", buf, i);
ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
buf,
reg & 0x80000000 ? "Disabled" : "Allowed",
reg & 0x40000000 ? "Disabled" : "Allowed");
ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
buf, (reg & 0x30000) >> 16, reg & 0xfff);
}
return ret;
#define SHOW_CACHE_DISABLE(index) \
static ssize_t \
show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
{ \
return show_cache_disable(this_leaf, buf, index); \
}
SHOW_CACHE_DISABLE(0)
SHOW_CACHE_DISABLE(1)
static ssize_t
store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
size_t count)
static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
const char *buf, size_t count, unsigned int index)
{
const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
int node = cpu_to_node(cpumask_first(mask));
struct pci_dev *dev = NULL;
unsigned int ret, index, val;
int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
int node = cpu_to_node(cpu);
struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned long val = 0;
unsigned int scrubber = 0;
if (!this_leaf->can_disable)
return 0;
if (strlen(buf) > 15)
return -EINVAL;
ret = sscanf(buf, "%x %x", &index, &val);
if (ret != 2)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!dev)
return -EINVAL;
if (index > 1)
if (strict_strtoul(buf, 10, &val) < 0)
return -EINVAL;
val |= 0xc0000000;
dev = get_k8_northbridge(node);
if (!dev) {
printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
return -EINVAL;
}
pci_read_config_dword(dev, 0x58, &scrubber);
scrubber &= ~0x1f000000;
pci_write_config_dword(dev, 0x58, scrubber);
pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
wbinvd();
pci_write_config_dword(dev, 0x1BC + index * 4, val);
return count;
}
return 1;
#define STORE_CACHE_DISABLE(index) \
static ssize_t \
store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
const char *buf, size_t count) \
{ \
return store_cache_disable(this_leaf, buf, count, index); \
}
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)
struct _cache_attr {
struct attribute attr;
......@@ -808,7 +795,10 @@ define_one_ro(size);
define_one_ro(shared_cpu_map);
define_one_ro(shared_cpu_list);
static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
show_cache_disable_0, store_cache_disable_0);
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
show_cache_disable_1, store_cache_disable_1);
static struct attribute * default_attrs[] = {
&type.attr,
......@@ -820,7 +810,8 @@ static struct attribute * default_attrs[] = {
&size.attr,
&shared_cpu_map.attr,
&shared_cpu_list.attr,
&cache_disable.attr,
&cache_disable_0.attr,
&cache_disable_1.attr,
NULL
};
......
......@@ -491,5 +491,42 @@ void force_hpet_resume(void)
break;
}
}
#endif
#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
/* Set correct numa_node information for AMD NB functions */
static void __init quirk_amd_nb_node(struct pci_dev *dev)
{
struct pci_dev *nb_ht;
unsigned int devfn;
u32 val;
devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
nb_ht = pci_get_slot(dev->bus, devfn);
if (!nb_ht)
return;
pci_read_config_dword(nb_ht, 0x60, &val);
set_dev_node(&dev->dev, val & 7);
pci_dev_put(dev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
quirk_amd_nb_node);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment