Commit c7d314f3 authored by Yazen Ghannam's avatar Yazen Ghannam Committed by Borislav Petkov

x86/MCE: Make the number of MCA banks a per-CPU variable

The number of MCA banks is provided per logical CPU. Historically, this
number has been the same across all CPUs, but this is not an
architectural guarantee. Future AMD systems may have MCA bank counts
that vary between logical CPUs in a system.

This issue was partially addressed in

  006c0770 ("x86/mce: Handle varying MCA bank counts")

by allocating structures using the maximum number of MCA banks and by
saving the maximum MCA bank count in a system as the global count. This
means that some extra structures are allocated. Also, this means that
CPUs will spend more time in the #MC and other handlers checking extra
MCA banks.

Thus, define the number of MCA banks as a per-CPU variable.

 [ bp: Make mce_num_banks an unsigned int. ]
Signed-off-by: default avatarYazen Ghannam <yazen.ghannam@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "linux-edac@vger.kernel.org" <linux-edac@vger.kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: "x86@kernel.org" <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190607201752.221446-5-Yazen.Ghannam@amd.com
parent 95d057f5
...@@ -495,7 +495,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high, ...@@ -495,7 +495,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high,
{ {
u32 addr = 0, offset = 0; u32 addr = 0, offset = 0;
if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
return addr; return addr;
if (mce_flags.smca) if (mce_flags.smca)
...@@ -627,11 +627,12 @@ void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank) ...@@ -627,11 +627,12 @@ void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
/* cpu init entry point, called from mce.c with preempt off */ /* cpu init entry point, called from mce.c with preempt off */
void mce_amd_feature_init(struct cpuinfo_x86 *c) void mce_amd_feature_init(struct cpuinfo_x86 *c)
{ {
u32 low = 0, high = 0, address = 0;
unsigned int bank, block, cpu = smp_processor_id(); unsigned int bank, block, cpu = smp_processor_id();
u32 low = 0, high = 0, address = 0;
int offset = -1; int offset = -1;
for (bank = 0; bank < mca_cfg.banks; ++bank) {
for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
if (mce_flags.smca) if (mce_flags.smca)
smca_configure(bank, cpu); smca_configure(bank, cpu);
...@@ -976,7 +977,7 @@ static void amd_deferred_error_interrupt(void) ...@@ -976,7 +977,7 @@ static void amd_deferred_error_interrupt(void)
{ {
unsigned int bank; unsigned int bank;
for (bank = 0; bank < mca_cfg.banks; ++bank) for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank)
log_error_deferred(bank); log_error_deferred(bank);
} }
...@@ -1017,7 +1018,7 @@ static void amd_threshold_interrupt(void) ...@@ -1017,7 +1018,7 @@ static void amd_threshold_interrupt(void)
struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL; struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
unsigned int bank, cpu = smp_processor_id(); unsigned int bank, cpu = smp_processor_id();
for (bank = 0; bank < mca_cfg.banks; ++bank) { for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank))) if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue; continue;
...@@ -1204,7 +1205,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, ...@@ -1204,7 +1205,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
u32 low, high; u32 low, high;
int err; int err;
if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
return 0; return 0;
if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
...@@ -1438,7 +1439,7 @@ int mce_threshold_remove_device(unsigned int cpu) ...@@ -1438,7 +1439,7 @@ int mce_threshold_remove_device(unsigned int cpu)
{ {
unsigned int bank; unsigned int bank;
for (bank = 0; bank < mca_cfg.banks; ++bank) { for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank))) if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue; continue;
threshold_remove_bank(cpu, bank); threshold_remove_bank(cpu, bank);
...@@ -1459,14 +1460,14 @@ int mce_threshold_create_device(unsigned int cpu) ...@@ -1459,14 +1460,14 @@ int mce_threshold_create_device(unsigned int cpu)
if (bp) if (bp)
return 0; return 0;
bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *), bp = kcalloc(per_cpu(mce_num_banks, cpu), sizeof(struct threshold_bank *),
GFP_KERNEL); GFP_KERNEL);
if (!bp) if (!bp)
return -ENOMEM; return -ENOMEM;
per_cpu(threshold_banks, cpu) = bp; per_cpu(threshold_banks, cpu) = bp;
for (bank = 0; bank < mca_cfg.banks; ++bank) { for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank))) if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue; continue;
err = threshold_create_bank(cpu, bank); err = threshold_create_bank(cpu, bank);
......
...@@ -65,6 +65,8 @@ static DEFINE_MUTEX(mce_sysfs_mutex); ...@@ -65,6 +65,8 @@ static DEFINE_MUTEX(mce_sysfs_mutex);
DEFINE_PER_CPU(unsigned, mce_exception_count); DEFINE_PER_CPU(unsigned, mce_exception_count);
DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
struct mce_bank { struct mce_bank {
u64 ctl; /* subevents to enable */ u64 ctl; /* subevents to enable */
bool init; /* initialise bank? */ bool init; /* initialise bank? */
...@@ -701,7 +703,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) ...@@ -701,7 +703,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
if (flags & MCP_TIMESTAMP) if (flags & MCP_TIMESTAMP)
m.tsc = rdtsc(); m.tsc = rdtsc();
for (i = 0; i < mca_cfg.banks; i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
if (!mce_banks[i].ctl || !test_bit(i, *b)) if (!mce_banks[i].ctl || !test_bit(i, *b))
continue; continue;
...@@ -803,7 +805,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, ...@@ -803,7 +805,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
char *tmp; char *tmp;
int i; int i;
for (i = 0; i < mca_cfg.banks; i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
m->status = mce_rdmsrl(msr_ops.status(i)); m->status = mce_rdmsrl(msr_ops.status(i));
if (!(m->status & MCI_STATUS_VAL)) if (!(m->status & MCI_STATUS_VAL))
continue; continue;
...@@ -1083,7 +1085,7 @@ static void mce_clear_state(unsigned long *toclear) ...@@ -1083,7 +1085,7 @@ static void mce_clear_state(unsigned long *toclear)
{ {
int i; int i;
for (i = 0; i < mca_cfg.banks; i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
if (test_bit(i, toclear)) if (test_bit(i, toclear))
mce_wrmsrl(msr_ops.status(i), 0); mce_wrmsrl(msr_ops.status(i), 0);
} }
...@@ -1141,7 +1143,7 @@ static void __mc_scan_banks(struct mce *m, struct mce *final, ...@@ -1141,7 +1143,7 @@ static void __mc_scan_banks(struct mce *m, struct mce *final,
struct mca_config *cfg = &mca_cfg; struct mca_config *cfg = &mca_cfg;
int severity, i; int severity, i;
for (i = 0; i < cfg->banks; i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
__clear_bit(i, toclear); __clear_bit(i, toclear);
if (!test_bit(i, valid_banks)) if (!test_bit(i, valid_banks))
continue; continue;
...@@ -1482,9 +1484,10 @@ EXPORT_SYMBOL_GPL(mce_notify_irq); ...@@ -1482,9 +1484,10 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
static void __mcheck_cpu_mce_banks_init(void) static void __mcheck_cpu_mce_banks_init(void)
{ {
struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
u8 n_banks = this_cpu_read(mce_num_banks);
int i; int i;
for (i = 0; i < MAX_NR_BANKS; i++) { for (i = 0; i < n_banks; i++) {
struct mce_bank *b = &mce_banks[i]; struct mce_bank *b = &mce_banks[i];
b->ctl = -1ULL; b->ctl = -1ULL;
...@@ -1503,10 +1506,14 @@ static void __mcheck_cpu_cap_init(void) ...@@ -1503,10 +1506,14 @@ static void __mcheck_cpu_cap_init(void)
rdmsrl(MSR_IA32_MCG_CAP, cap); rdmsrl(MSR_IA32_MCG_CAP, cap);
b = cap & MCG_BANKCNT_MASK; b = cap & MCG_BANKCNT_MASK;
if (WARN_ON_ONCE(b > MAX_NR_BANKS))
if (b > MAX_NR_BANKS) {
pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
smp_processor_id(), MAX_NR_BANKS, b);
b = MAX_NR_BANKS; b = MAX_NR_BANKS;
}
mca_cfg.banks = max(mca_cfg.banks, b); this_cpu_write(mce_num_banks, b);
__mcheck_cpu_mce_banks_init(); __mcheck_cpu_mce_banks_init();
...@@ -1545,7 +1552,7 @@ static void __mcheck_cpu_init_clear_banks(void) ...@@ -1545,7 +1552,7 @@ static void __mcheck_cpu_init_clear_banks(void)
struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
int i; int i;
for (i = 0; i < mca_cfg.banks; i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
struct mce_bank *b = &mce_banks[i]; struct mce_bank *b = &mce_banks[i];
if (!b->init) if (!b->init)
...@@ -1596,7 +1603,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) ...@@ -1596,7 +1603,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
/* This should be disabled by the BIOS, but isn't always */ /* This should be disabled by the BIOS, but isn't always */
if (c->x86_vendor == X86_VENDOR_AMD) { if (c->x86_vendor == X86_VENDOR_AMD) {
if (c->x86 == 15 && cfg->banks > 4) { if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
/* /*
* disable GART TBL walk error reporting, which * disable GART TBL walk error reporting, which
* trips off incorrectly with the IOMMU & 3ware * trips off incorrectly with the IOMMU & 3ware
...@@ -1615,7 +1622,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) ...@@ -1615,7 +1622,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
* Various K7s with broken bank 0 around. Always disable * Various K7s with broken bank 0 around. Always disable
* by default. * by default.
*/ */
if (c->x86 == 6 && cfg->banks > 0) if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
mce_banks[0].ctl = 0; mce_banks[0].ctl = 0;
/* /*
...@@ -1637,7 +1644,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) ...@@ -1637,7 +1644,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
* valid event later, merely don't write CTL0. * valid event later, merely don't write CTL0.
*/ */
if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0) if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
mce_banks[0].init = 0; mce_banks[0].init = 0;
/* /*
...@@ -1873,7 +1880,7 @@ static void __mce_disable_bank(void *arg) ...@@ -1873,7 +1880,7 @@ static void __mce_disable_bank(void *arg)
void mce_disable_bank(int bank) void mce_disable_bank(int bank)
{ {
if (bank >= mca_cfg.banks) { if (bank >= this_cpu_read(mce_num_banks)) {
pr_warn(FW_BUG pr_warn(FW_BUG
"Ignoring request to disable invalid MCA bank %d.\n", "Ignoring request to disable invalid MCA bank %d.\n",
bank); bank);
...@@ -1962,7 +1969,7 @@ static void mce_disable_error_reporting(void) ...@@ -1962,7 +1969,7 @@ static void mce_disable_error_reporting(void)
struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
int i; int i;
for (i = 0; i < mca_cfg.banks; i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
struct mce_bank *b = &mce_banks[i]; struct mce_bank *b = &mce_banks[i];
if (b->init) if (b->init)
...@@ -2073,7 +2080,7 @@ static ssize_t show_bank(struct device *s, struct device_attribute *attr, ...@@ -2073,7 +2080,7 @@ static ssize_t show_bank(struct device *s, struct device_attribute *attr,
u8 bank = attr_to_bank(attr)->bank; u8 bank = attr_to_bank(attr)->bank;
struct mce_bank *b; struct mce_bank *b;
if (bank >= mca_cfg.banks) if (bank >= per_cpu(mce_num_banks, s->id))
return -EINVAL; return -EINVAL;
b = &per_cpu(mce_banks_array, s->id)[bank]; b = &per_cpu(mce_banks_array, s->id)[bank];
...@@ -2091,7 +2098,7 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr, ...@@ -2091,7 +2098,7 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
if (kstrtou64(buf, 0, &new) < 0) if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL; return -EINVAL;
if (bank >= mca_cfg.banks) if (bank >= per_cpu(mce_num_banks, s->id))
return -EINVAL; return -EINVAL;
b = &per_cpu(mce_banks_array, s->id)[bank]; b = &per_cpu(mce_banks_array, s->id)[bank];
...@@ -2243,7 +2250,7 @@ static int mce_device_create(unsigned int cpu) ...@@ -2243,7 +2250,7 @@ static int mce_device_create(unsigned int cpu)
if (err) if (err)
goto error; goto error;
} }
for (j = 0; j < mca_cfg.banks; j++) { for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
err = device_create_file(dev, &mce_bank_devs[j].attr); err = device_create_file(dev, &mce_bank_devs[j].attr);
if (err) if (err)
goto error2; goto error2;
...@@ -2275,7 +2282,7 @@ static void mce_device_remove(unsigned int cpu) ...@@ -2275,7 +2282,7 @@ static void mce_device_remove(unsigned int cpu)
for (i = 0; mce_device_attrs[i]; i++) for (i = 0; mce_device_attrs[i]; i++)
device_remove_file(dev, mce_device_attrs[i]); device_remove_file(dev, mce_device_attrs[i]);
for (i = 0; i < mca_cfg.banks; i++) for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
device_remove_file(dev, &mce_bank_devs[i].attr); device_remove_file(dev, &mce_bank_devs[i].attr);
device_unregister(dev); device_unregister(dev);
...@@ -2305,7 +2312,7 @@ static void mce_reenable_cpu(void) ...@@ -2305,7 +2312,7 @@ static void mce_reenable_cpu(void)
if (!cpuhp_tasks_frozen) if (!cpuhp_tasks_frozen)
cmci_reenable(); cmci_reenable();
for (i = 0; i < mca_cfg.banks; i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
struct mce_bank *b = &mce_banks[i]; struct mce_bank *b = &mce_banks[i];
if (b->init) if (b->init)
...@@ -2493,8 +2500,6 @@ EXPORT_SYMBOL_GPL(mcsafe_key); ...@@ -2493,8 +2500,6 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
static int __init mcheck_late_init(void) static int __init mcheck_late_init(void)
{ {
pr_info("Using %d MCE banks\n", mca_cfg.banks);
if (mca_cfg.recovery) if (mca_cfg.recovery)
static_branch_inc(&mcsafe_key); static_branch_inc(&mcsafe_key);
......
...@@ -118,7 +118,6 @@ struct mca_config { ...@@ -118,7 +118,6 @@ struct mca_config {
bios_cmci_threshold : 1, bios_cmci_threshold : 1,
__reserved : 59; __reserved : 59;
u8 banks;
s8 bootlog; s8 bootlog;
int tolerant; int tolerant;
int monarch_timeout; int monarch_timeout;
...@@ -127,6 +126,7 @@ struct mca_config { ...@@ -127,6 +126,7 @@ struct mca_config {
}; };
extern struct mca_config mca_cfg; extern struct mca_config mca_cfg;
DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
struct mce_vendor_flags { struct mce_vendor_flags {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment