Commit e07e0d4c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 RAS update from Ingo Molnar:
 "The changes in this cycle were:

   - allow mmcfg access to APEI error injection handlers

   - improve MCE error messages

   - smaller cleanups"

* 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, mce: Fix sparse errors
  x86, mce: Improve timeout error messages
  ACPI, EINJ: Enhance error injection tolerance level
parents 57d36294 93d76c80
...@@ -116,7 +116,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); ...@@ -116,7 +116,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
* CPU/chipset specific EDAC code can register a notifier call here to print * CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form. * MCE errors in a human-readable form.
*/ */
ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
/* Do initial initialization of a struct mce */ /* Do initial initialization of a struct mce */
void mce_setup(struct mce *m) void mce_setup(struct mce *m)
...@@ -312,7 +312,7 @@ static void wait_for_panic(void) ...@@ -312,7 +312,7 @@ static void wait_for_panic(void)
panic("Panicing machine check CPU died"); panic("Panicing machine check CPU died");
} }
static void mce_panic(char *msg, struct mce *final, char *exp) static void mce_panic(const char *msg, struct mce *final, char *exp)
{ {
int i, apei_err = 0; int i, apei_err = 0;
...@@ -530,7 +530,7 @@ static void mce_schedule_work(void) ...@@ -530,7 +530,7 @@ static void mce_schedule_work(void)
schedule_work(this_cpu_ptr(&mce_work)); schedule_work(this_cpu_ptr(&mce_work));
} }
DEFINE_PER_CPU(struct irq_work, mce_irq_work); static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
static void mce_irq_work_cb(struct irq_work *entry) static void mce_irq_work_cb(struct irq_work *entry)
{ {
...@@ -736,7 +736,7 @@ static atomic_t mce_callin; ...@@ -736,7 +736,7 @@ static atomic_t mce_callin;
/* /*
* Check if a timeout waiting for other CPUs happened. * Check if a timeout waiting for other CPUs happened.
*/ */
static int mce_timed_out(u64 *t) static int mce_timed_out(u64 *t, const char *msg)
{ {
/* /*
* The others already did panic for some reason. * The others already did panic for some reason.
...@@ -751,8 +751,7 @@ static int mce_timed_out(u64 *t) ...@@ -751,8 +751,7 @@ static int mce_timed_out(u64 *t)
goto out; goto out;
if ((s64)*t < SPINUNIT) { if ((s64)*t < SPINUNIT) {
if (mca_cfg.tolerant <= 1) if (mca_cfg.tolerant <= 1)
mce_panic("Timeout synchronizing machine check over CPUs", mce_panic(msg, NULL, NULL);
NULL, NULL);
cpu_missing = 1; cpu_missing = 1;
return 1; return 1;
} }
...@@ -868,7 +867,8 @@ static int mce_start(int *no_way_out) ...@@ -868,7 +867,8 @@ static int mce_start(int *no_way_out)
* Wait for everyone. * Wait for everyone.
*/ */
while (atomic_read(&mce_callin) != cpus) { while (atomic_read(&mce_callin) != cpus) {
if (mce_timed_out(&timeout)) { if (mce_timed_out(&timeout,
"Timeout: Not all CPUs entered broadcast exception handler")) {
atomic_set(&global_nwo, 0); atomic_set(&global_nwo, 0);
return -1; return -1;
} }
...@@ -893,7 +893,8 @@ static int mce_start(int *no_way_out) ...@@ -893,7 +893,8 @@ static int mce_start(int *no_way_out)
* only seen by one CPU before cleared, avoiding duplicates. * only seen by one CPU before cleared, avoiding duplicates.
*/ */
while (atomic_read(&mce_executing) < order) { while (atomic_read(&mce_executing) < order) {
if (mce_timed_out(&timeout)) { if (mce_timed_out(&timeout,
"Timeout: Subject CPUs unable to finish machine check processing")) {
atomic_set(&global_nwo, 0); atomic_set(&global_nwo, 0);
return -1; return -1;
} }
...@@ -937,7 +938,8 @@ static int mce_end(int order) ...@@ -937,7 +938,8 @@ static int mce_end(int order)
* loops. * loops.
*/ */
while (atomic_read(&mce_executing) <= cpus) { while (atomic_read(&mce_executing) <= cpus) {
if (mce_timed_out(&timeout)) if (mce_timed_out(&timeout,
"Timeout: Monarch CPU unable to finish machine check processing"))
goto reset; goto reset;
ndelay(SPINUNIT); ndelay(SPINUNIT);
} }
...@@ -950,7 +952,8 @@ static int mce_end(int order) ...@@ -950,7 +952,8 @@ static int mce_end(int order)
* Subject: Wait for Monarch to finish. * Subject: Wait for Monarch to finish.
*/ */
while (atomic_read(&mce_executing) != 0) { while (atomic_read(&mce_executing) != 0) {
if (mce_timed_out(&timeout)) if (mce_timed_out(&timeout,
"Timeout: Monarch CPU did not finish machine check processing"))
goto reset; goto reset;
ndelay(SPINUNIT); ndelay(SPINUNIT);
} }
......
...@@ -610,6 +610,32 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header) ...@@ -610,6 +610,32 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
return 0; return 0;
} }
#ifdef CONFIG_ACPI_APEI
extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
void *data), void *data);
static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
void *data), void *data)
{
struct pci_mmcfg_region *cfg;
int rc;
if (list_empty(&pci_mmcfg_list))
return 0;
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
rc = func(cfg->res.start, resource_size(&cfg->res), data);
if (rc)
return rc;
}
return 0;
}
#define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
#else
#define set_apei_filter()
#endif
static void __init __pci_mmcfg_init(int early) static void __init __pci_mmcfg_init(int early)
{ {
pci_mmcfg_reject_broken(early); pci_mmcfg_reject_broken(early);
...@@ -644,6 +670,8 @@ void __init pci_mmcfg_early_init(void) ...@@ -644,6 +670,8 @@ void __init pci_mmcfg_early_init(void)
else else
acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
__pci_mmcfg_init(1); __pci_mmcfg_init(1);
set_apei_filter();
} }
} }
......
...@@ -449,7 +449,7 @@ int apei_resources_sub(struct apei_resources *resources1, ...@@ -449,7 +449,7 @@ int apei_resources_sub(struct apei_resources *resources1,
} }
EXPORT_SYMBOL_GPL(apei_resources_sub); EXPORT_SYMBOL_GPL(apei_resources_sub);
static int apei_get_nvs_callback(__u64 start, __u64 size, void *data) static int apei_get_res_callback(__u64 start, __u64 size, void *data)
{ {
struct apei_resources *resources = data; struct apei_resources *resources = data;
return apei_res_add(&resources->iomem, start, size); return apei_res_add(&resources->iomem, start, size);
...@@ -457,7 +457,15 @@ static int apei_get_nvs_callback(__u64 start, __u64 size, void *data) ...@@ -457,7 +457,15 @@ static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
static int apei_get_nvs_resources(struct apei_resources *resources) static int apei_get_nvs_resources(struct apei_resources *resources)
{ {
return acpi_nvs_for_each_region(apei_get_nvs_callback, resources); return acpi_nvs_for_each_region(apei_get_res_callback, resources);
}
int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
void *data), void *data);
static int apei_get_arch_resources(struct apei_resources *resources)
{
return arch_apei_filter_addr(apei_get_res_callback, resources);
} }
/* /*
...@@ -470,7 +478,7 @@ int apei_resources_request(struct apei_resources *resources, ...@@ -470,7 +478,7 @@ int apei_resources_request(struct apei_resources *resources,
{ {
struct apei_res *res, *res_bak = NULL; struct apei_res *res, *res_bak = NULL;
struct resource *r; struct resource *r;
struct apei_resources nvs_resources; struct apei_resources nvs_resources, arch_res;
int rc; int rc;
rc = apei_resources_sub(resources, &apei_resources_all); rc = apei_resources_sub(resources, &apei_resources_all);
...@@ -485,10 +493,20 @@ int apei_resources_request(struct apei_resources *resources, ...@@ -485,10 +493,20 @@ int apei_resources_request(struct apei_resources *resources,
apei_resources_init(&nvs_resources); apei_resources_init(&nvs_resources);
rc = apei_get_nvs_resources(&nvs_resources); rc = apei_get_nvs_resources(&nvs_resources);
if (rc) if (rc)
goto res_fini; goto nvs_res_fini;
rc = apei_resources_sub(resources, &nvs_resources); rc = apei_resources_sub(resources, &nvs_resources);
if (rc) if (rc)
goto res_fini; goto nvs_res_fini;
if (arch_apei_filter_addr) {
apei_resources_init(&arch_res);
rc = apei_get_arch_resources(&arch_res);
if (rc)
goto arch_res_fini;
rc = apei_resources_sub(resources, &arch_res);
if (rc)
goto arch_res_fini;
}
rc = -EINVAL; rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) { list_for_each_entry(res, &resources->iomem, list) {
...@@ -536,7 +554,9 @@ int apei_resources_request(struct apei_resources *resources, ...@@ -536,7 +554,9 @@ int apei_resources_request(struct apei_resources *resources,
break; break;
release_mem_region(res->start, res->end - res->start); release_mem_region(res->start, res->end - res->start);
} }
res_fini: arch_res_fini:
apei_resources_fini(&arch_res);
nvs_res_fini:
apei_resources_fini(&nvs_resources); apei_resources_fini(&nvs_resources);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment