Commit 535b2f73 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 CPU updates from Ingo Molnar:
 "The changes in this development cycle were:

   - AMD CPU topology enhancements that are cleanups on current CPUs but
     which enable future Fam17 hardware. (Yazen Ghannam)

   - unify bugs.c and bugs_64.c (Borislav Petkov)

   - remove the show_msr= boot option (Borislav Petkov)

   - simplify a boot message (Borislav Petkov)"

* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu/AMD: Clean up cpu_llc_id assignment per topology feature
  x86/cpu: Get rid of the show_msr= boot option
  x86/cpu: Merge bugs.c and bugs_64.c
  x86/cpu: Remove the printk format specifier in "CPU0: "
parents ef486c59 b6a50cdd
...@@ -3826,12 +3826,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -3826,12 +3826,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
shapers= [NET] shapers= [NET]
Maximal number of shapers. Maximal number of shapers.
show_msr= [x86] show boot-time MSR settings
Format: { <integer> }
Show boot-time (BIOS-initialized) MSR settings.
The parameter means the number of CPUs to show,
for example 1 means boot CPU only.
simeth= [IA-64] simeth= [IA-64]
simscsi= simscsi=
......
...@@ -20,13 +20,11 @@ obj-y := intel_cacheinfo.o scattered.o topology.o ...@@ -20,13 +20,11 @@ obj-y := intel_cacheinfo.o scattered.o topology.o
obj-y += common.o obj-y += common.o
obj-y += rdrand.o obj-y += rdrand.o
obj-y += match.o obj-y += match.o
obj-y += bugs.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
obj-$(CONFIG_X86_32) += bugs.o
obj-$(CONFIG_X86_64) += bugs_64.o
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
......
...@@ -314,11 +314,30 @@ static void amd_get_topology(struct cpuinfo_x86 *c) ...@@ -314,11 +314,30 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
smp_num_siblings = ((ebx >> 8) & 3) + 1; smp_num_siblings = ((ebx >> 8) & 3) + 1;
c->x86_max_cores /= smp_num_siblings; c->x86_max_cores /= smp_num_siblings;
c->cpu_core_id = ebx & 0xff; c->cpu_core_id = ebx & 0xff;
/*
* We may have multiple LLCs if L3 caches exist, so check if we
* have an L3 cache by looking at the L3 cache CPUID leaf.
*/
if (cpuid_edx(0x80000006)) {
if (c->x86 == 0x17) {
/*
* LLC is at the core complex level.
* Core complex id is ApicId[3].
*/
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
} else {
/* LLC is at the node level. */
per_cpu(cpu_llc_id, cpu) = node_id;
}
}
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value; u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value); rdmsrl(MSR_FAM10H_NODE_ID, value);
node_id = value & 7; node_id = value & 7;
per_cpu(cpu_llc_id, cpu) = node_id;
} else } else
return; return;
...@@ -329,9 +348,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) ...@@ -329,9 +348,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_AMD_DCM); set_cpu_cap(c, X86_FEATURE_AMD_DCM);
cus_per_node = c->x86_max_cores / nodes_per_socket; cus_per_node = c->x86_max_cores / nodes_per_socket;
/* store NodeID, use llc_shared_map to store sibling info */
per_cpu(cpu_llc_id, cpu) = node_id;
/* core id has to be in the [0 .. cores_per_node - 1] range */ /* core id has to be in the [0 .. cores_per_node - 1] range */
c->cpu_core_id %= cus_per_node; c->cpu_core_id %= cus_per_node;
} }
...@@ -356,15 +372,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -356,15 +372,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
/* use socket ID also for last level cache */ /* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
amd_get_topology(c); amd_get_topology(c);
/*
* Fix percpu cpu_llc_id here as LLC topology is different
* for Fam17h systems.
*/
if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
return;
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
#endif #endif
} }
......
...@@ -16,15 +16,19 @@ ...@@ -16,15 +16,19 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
void __init check_bugs(void) void __init check_bugs(void)
{ {
identify_boot_cpu(); identify_boot_cpu();
#ifndef CONFIG_SMP
pr_info("CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");
print_cpu_info(&boot_cpu_data);
}
#ifdef CONFIG_X86_32
/* /*
* Check whether we are able to run this kernel safely on SMP. * Check whether we are able to run this kernel safely on SMP.
* *
...@@ -40,4 +44,18 @@ void __init check_bugs(void) ...@@ -40,4 +44,18 @@ void __init check_bugs(void)
alternative_instructions(); alternative_instructions();
fpu__init_check_bugs(); fpu__init_check_bugs();
#else /* CONFIG_X86_64 */
alternative_instructions();
/*
* Make sure the first 2MB area is not mapped by huge pages
* There are typically fixed size MTRRs in there and overlapping
* MTRRs into large pages causes slow downs.
*
* Right now we don't do that with gbpages because there seems
* very little benefit for that case.
*/
if (!direct_gbpages)
set_memory_4k((unsigned long)__va(0), 1);
#endif
} }
/*
* Copyright (C) 1994 Linus Torvalds
* Copyright (C) 2000 SuSE
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/alternative.h>
#include <asm/bugs.h>
#include <asm/processor.h>
#include <asm/mtrr.h>
#include <asm/cacheflush.h>
void __init check_bugs(void)
{
identify_boot_cpu();
#if !defined(CONFIG_SMP)
pr_info("CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
alternative_instructions();
/*
* Make sure the first 2MB area is not mapped by huge pages
* There are typically fixed size MTRRs in there and overlapping
* MTRRs into large pages causes slow downs.
*
* Right now we don't do that with gbpages because there seems
* very little benefit for that case.
*/
if (!direct_gbpages)
set_memory_4k((unsigned long)__va(0), 1);
}
...@@ -1190,51 +1190,6 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) ...@@ -1190,51 +1190,6 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init(); mtrr_ap_init();
} }
struct msr_range {
unsigned min;
unsigned max;
};
static const struct msr_range msr_range_array[] = {
{ 0x00000000, 0x00000418},
{ 0xc0000000, 0xc000040b},
{ 0xc0010000, 0xc0010142},
{ 0xc0011000, 0xc001103b},
};
static void __print_cpu_msr(void)
{
unsigned index_min, index_max;
unsigned index;
u64 val;
int i;
for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
index_min = msr_range_array[i].min;
index_max = msr_range_array[i].max;
for (index = index_min; index < index_max; index++) {
if (rdmsrl_safe(index, &val))
continue;
pr_info(" MSR%08x: %016llx\n", index, val);
}
}
}
static int show_msr;
static __init int setup_show_msr(char *arg)
{
int num;
get_option(&arg, &num);
if (num > 0)
show_msr = num;
return 1;
}
__setup("show_msr=", setup_show_msr);
static __init int setup_noclflush(char *arg) static __init int setup_noclflush(char *arg)
{ {
setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
...@@ -1268,14 +1223,6 @@ void print_cpu_info(struct cpuinfo_x86 *c) ...@@ -1268,14 +1223,6 @@ void print_cpu_info(struct cpuinfo_x86 *c)
pr_cont(", stepping: 0x%x)\n", c->x86_mask); pr_cont(", stepping: 0x%x)\n", c->x86_mask);
else else
pr_cont(")\n"); pr_cont(")\n");
print_cpu_msr(c);
}
void print_cpu_msr(struct cpuinfo_x86 *c)
{
if (c->cpu_index < show_msr)
__print_cpu_msr();
} }
static __init int setup_disablecpuid(char *arg) static __init int setup_disablecpuid(char *arg)
......
...@@ -1352,7 +1352,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1352,7 +1352,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
default_setup_apic_routing(); default_setup_apic_routing();
cpu0_logical_apicid = apic_bsp_setup(false); cpu0_logical_apicid = apic_bsp_setup(false);
pr_info("CPU%d: ", 0); pr_info("CPU0: ");
print_cpu_info(&cpu_data(0)); print_cpu_info(&cpu_data(0));
if (is_uv_system()) if (is_uv_system())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment