Commit 2b091875 authored by Siddha, Suresh B's avatar Siddha, Suresh B Committed by Linus Torvalds

[PATCH] x86_64: x86_64/i386 fix Intel cache detection code assumption about threads sharing

Fix the Intel cache detection code assumption that number of threads
sharing the cache will either be equal to number of HT or core siblings.

This also cleans up the code in general a bit.
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 94605eff
...@@ -293,29 +293,45 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS]; ...@@ -293,29 +293,45 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{ {
struct _cpuid4_info *this_leaf; struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing; unsigned long num_threads_sharing;
#ifdef CONFIG_X86_HT int index_msb, i;
struct cpuinfo_x86 *c = cpu_data + cpu; struct cpuinfo_x86 *c = cpu_data;
#endif
this_leaf = CPUID4_INFO_IDX(cpu, index); this_leaf = CPUID4_INFO_IDX(cpu, index);
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
if (num_threads_sharing == 1) if (num_threads_sharing == 1)
cpu_set(cpu, this_leaf->shared_cpu_map); cpu_set(cpu, this_leaf->shared_cpu_map);
#ifdef CONFIG_X86_HT else {
else if (num_threads_sharing == smp_num_siblings) index_msb = get_count_order(num_threads_sharing);
this_leaf->shared_cpu_map = cpu_sibling_map[cpu];
else if (num_threads_sharing == (c->x86_max_cores * smp_num_siblings)) for_each_online_cpu(i) {
this_leaf->shared_cpu_map = cpu_core_map[cpu]; if (c[i].apicid >> index_msb ==
else c[cpu].apicid >> index_msb) {
printk(KERN_DEBUG "Number of CPUs sharing cache didn't match " cpu_set(i, this_leaf->shared_cpu_map);
"any known set of CPUs\n"); if (i != cpu && cpuid4_info[i]) {
#endif sibling_leaf = CPUID4_INFO_IDX(i, index);
cpu_set(cpu, sibling_leaf->shared_cpu_map);
}
}
}
}
}
static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
int sibling;
this_leaf = CPUID4_INFO_IDX(cpu, index);
for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
cpu_clear(cpu, sibling_leaf->shared_cpu_map);
}
} }
#else #else
static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
#endif #endif
static void free_cache_attributes(unsigned int cpu) static void free_cache_attributes(unsigned int cpu)
...@@ -574,8 +590,10 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) ...@@ -574,8 +590,10 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id; unsigned int cpu = sys_dev->id;
unsigned long i; unsigned long i;
for (i = 0; i < num_cache_leaves; i++) for (i = 0; i < num_cache_leaves; i++) {
cache_remove_shared_cpu_map(cpu, i);
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
}
kobject_unregister(cache_kobject[cpu]); kobject_unregister(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu); cpuid4_cache_sysfs_exit(cpu);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment