Commit 0ef283d4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 RAS updates from Thomas Gleixner:

 - Fix a stack out of bounds write in the MCE error injection code.

 - Avoid IPIs during CPU hotplug to read the MCx_MISC block address from
   a remote CPU. That's fragile and pointless because the block
   addresses are the same on all CPUs. So they can be read once and
   local.

 - Add support for MCE broadcasting on newer VIA Centaur CPUs.

* 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/MCE/AMD: Read MCx_MISC block addresses on any CPU
  x86/MCE: Fix stack out-of-bounds write in mce-inject.c: Flags_read()
  x86/MCE: Enable MCE broadcasting on new Centaur CPUs
parents db020be9 fbf96cf9
...@@ -48,7 +48,7 @@ static struct dentry *dfs_inj; ...@@ -48,7 +48,7 @@ static struct dentry *dfs_inj;
static u8 n_banks; static u8 n_banks;
#define MAX_FLAG_OPT_SIZE 3 #define MAX_FLAG_OPT_SIZE 4
#define NBCFG 0x44 #define NBCFG 0x44
enum injection_type { enum injection_type {
......
...@@ -1727,6 +1727,21 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) ...@@ -1727,6 +1727,21 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
} }
} }
static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
{
struct mca_config *cfg = &mca_cfg;
/*
* All newer Centaur CPUs support MCE broadcasting. Enable
* synchronization with a one second timeout.
*/
if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
c->x86 > 6) {
if (cfg->monarch_timeout < 0)
cfg->monarch_timeout = USEC_PER_SEC;
}
}
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
{ {
switch (c->x86_vendor) { switch (c->x86_vendor) {
...@@ -1739,6 +1754,9 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) ...@@ -1739,6 +1754,9 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
mce_amd_feature_init(c); mce_amd_feature_init(c);
break; break;
} }
case X86_VENDOR_CENTAUR:
mce_centaur_feature_init(c);
break;
default: default:
break; break;
......
...@@ -436,8 +436,7 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) ...@@ -436,8 +436,7 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
wrmsr(MSR_CU_DEF_ERR, low, high); wrmsr(MSR_CU_DEF_ERR, low, high);
} }
static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, static u32 smca_get_block_address(unsigned int bank, unsigned int block)
unsigned int block)
{ {
u32 low, high; u32 low, high;
u32 addr = 0; u32 addr = 0;
...@@ -456,13 +455,13 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, ...@@ -456,13 +455,13 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
* For SMCA enabled processors, BLKPTR field of the first MISC register * For SMCA enabled processors, BLKPTR field of the first MISC register
* (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
*/ */
if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
goto out; goto out;
if (!(low & MCI_CONFIG_MCAX)) if (!(low & MCI_CONFIG_MCAX))
goto out; goto out;
if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
(low & MASK_BLKPTR_LO)) (low & MASK_BLKPTR_LO))
addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
...@@ -471,7 +470,7 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, ...@@ -471,7 +470,7 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
return addr; return addr;
} }
static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, static u32 get_block_address(u32 current_addr, u32 low, u32 high,
unsigned int bank, unsigned int block) unsigned int bank, unsigned int block)
{ {
u32 addr = 0, offset = 0; u32 addr = 0, offset = 0;
...@@ -480,7 +479,7 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi ...@@ -480,7 +479,7 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi
return addr; return addr;
if (mce_flags.smca) if (mce_flags.smca)
return smca_get_block_address(cpu, bank, block); return smca_get_block_address(bank, block);
/* Fall back to method we used for older processors: */ /* Fall back to method we used for older processors: */
switch (block) { switch (block) {
...@@ -558,7 +557,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) ...@@ -558,7 +557,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
smca_configure(bank, cpu); smca_configure(bank, cpu);
for (block = 0; block < NR_BLOCKS; ++block) { for (block = 0; block < NR_BLOCKS; ++block) {
address = get_block_address(cpu, address, low, high, bank, block); address = get_block_address(address, low, high, bank, block);
if (!address) if (!address)
break; break;
...@@ -1175,7 +1174,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, ...@@ -1175,7 +1174,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
if (err) if (err)
goto out_free; goto out_free;
recurse: recurse:
address = get_block_address(cpu, address, low, high, bank, ++block); address = get_block_address(address, low, high, bank, ++block);
if (!address) if (!address)
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment