Commit f11445ba authored by Borislav Petkov's avatar Borislav Petkov

x86/mce: Use arch atomic and bit helpers

The arch helpers do not have explicit KASAN instrumentation. Use them in
noinstr code.

Inline a couple more functions with single call sites, while at it:

mce_severity_amd_smca() has a single call-site which is noinstr so force
the inlining and fix:

  vmlinux.o: warning: objtool: mce_severity_amd.constprop.0()+0xca: call to \
	  mce_severity_amd_smca() leaves .noinstr.text section

Always inline mca_msr_reg():

     text    data     bss     dec     hex filename
  16065240        128031326       36405368        180501934       ac23dae vmlinux.before
  16065240        128031294       36405368        180501902       ac23d8e vmlinux.after

and mce_no_way_out() as the latter one is used only once, to fix:

  vmlinux.o: warning: objtool: mce_read_aux()+0x53: call to mca_msr_reg() leaves .noinstr.text section
  vmlinux.o: warning: objtool: do_machine_check()+0xc9: call to mce_no_way_out() leaves .noinstr.text section
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarMarco Elver <elver@google.com>
Link: https://lore.kernel.org/r/20220204083015.17317-4-bp@alien8.de
parent c0f6799d
...@@ -173,27 +173,6 @@ void mce_unregister_decode_chain(struct notifier_block *nb) ...@@ -173,27 +173,6 @@ void mce_unregister_decode_chain(struct notifier_block *nb)
} }
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
u32 mca_msr_reg(int bank, enum mca_msr reg)
{
if (mce_flags.smca) {
switch (reg) {
case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank);
case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank);
case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank);
case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank);
}
}
switch (reg) {
case MCA_CTL: return MSR_IA32_MCx_CTL(bank);
case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank);
case MCA_MISC: return MSR_IA32_MCx_MISC(bank);
case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank);
}
return 0;
}
static void __print_mce(struct mce *m) static void __print_mce(struct mce *m)
{ {
pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
...@@ -814,7 +793,8 @@ EXPORT_SYMBOL_GPL(machine_check_poll); ...@@ -814,7 +793,8 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
* the severity assessment code. Pretend that EIPV was set, and take the * the severity assessment code. Pretend that EIPV was set, and take the
* ip/cs values from the pt_regs that mce_gather_info() ignored earlier. * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
*/ */
static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) static __always_inline void
quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
{ {
if (bank != 0) if (bank != 0)
return; return;
...@@ -838,8 +818,8 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) ...@@ -838,8 +818,8 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
* Do a quick check if any of the events requires a panic. * Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them. * This decides if we keep the events around or clear them.
*/ */
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, static __always_inline int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
struct pt_regs *regs) struct pt_regs *regs)
{ {
char *tmp = *msg; char *tmp = *msg;
int i; int i;
...@@ -849,7 +829,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, ...@@ -849,7 +829,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
if (!(m->status & MCI_STATUS_VAL)) if (!(m->status & MCI_STATUS_VAL))
continue; continue;
__set_bit(i, validp); arch___set_bit(i, validp);
if (mce_flags.snb_ifu_quirk) if (mce_flags.snb_ifu_quirk)
quirk_sandybridge_ifu(i, m, regs); quirk_sandybridge_ifu(i, m, regs);
...@@ -1015,13 +995,13 @@ static noinstr int mce_start(int *no_way_out) ...@@ -1015,13 +995,13 @@ static noinstr int mce_start(int *no_way_out)
if (!timeout) if (!timeout)
return ret; return ret;
atomic_add(*no_way_out, &global_nwo); arch_atomic_add(*no_way_out, &global_nwo);
/* /*
* Rely on the implied barrier below, such that global_nwo * Rely on the implied barrier below, such that global_nwo
* is updated before mce_callin. * is updated before mce_callin.
*/ */
order = atomic_inc_return(&mce_callin); order = arch_atomic_inc_return(&mce_callin);
cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus); arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
/* Enable instrumentation around calls to external facilities */ /* Enable instrumentation around calls to external facilities */
instrumentation_begin(); instrumentation_begin();
...@@ -1029,10 +1009,10 @@ static noinstr int mce_start(int *no_way_out) ...@@ -1029,10 +1009,10 @@ static noinstr int mce_start(int *no_way_out)
/* /*
* Wait for everyone. * Wait for everyone.
*/ */
while (atomic_read(&mce_callin) != num_online_cpus()) { while (arch_atomic_read(&mce_callin) != num_online_cpus()) {
if (mce_timed_out(&timeout, if (mce_timed_out(&timeout,
"Timeout: Not all CPUs entered broadcast exception handler")) { "Timeout: Not all CPUs entered broadcast exception handler")) {
atomic_set(&global_nwo, 0); arch_atomic_set(&global_nwo, 0);
goto out; goto out;
} }
ndelay(SPINUNIT); ndelay(SPINUNIT);
...@@ -1047,7 +1027,7 @@ static noinstr int mce_start(int *no_way_out) ...@@ -1047,7 +1027,7 @@ static noinstr int mce_start(int *no_way_out)
/* /*
* Monarch: Starts executing now, the others wait. * Monarch: Starts executing now, the others wait.
*/ */
atomic_set(&mce_executing, 1); arch_atomic_set(&mce_executing, 1);
} else { } else {
/* /*
* Subject: Now start the scanning loop one by one in * Subject: Now start the scanning loop one by one in
...@@ -1055,10 +1035,10 @@ static noinstr int mce_start(int *no_way_out) ...@@ -1055,10 +1035,10 @@ static noinstr int mce_start(int *no_way_out)
* This way when there are any shared banks it will be * This way when there are any shared banks it will be
* only seen by one CPU before cleared, avoiding duplicates. * only seen by one CPU before cleared, avoiding duplicates.
*/ */
while (atomic_read(&mce_executing) < order) { while (arch_atomic_read(&mce_executing) < order) {
if (mce_timed_out(&timeout, if (mce_timed_out(&timeout,
"Timeout: Subject CPUs unable to finish machine check processing")) { "Timeout: Subject CPUs unable to finish machine check processing")) {
atomic_set(&global_nwo, 0); arch_atomic_set(&global_nwo, 0);
goto out; goto out;
} }
ndelay(SPINUNIT); ndelay(SPINUNIT);
...@@ -1068,7 +1048,7 @@ static noinstr int mce_start(int *no_way_out) ...@@ -1068,7 +1048,7 @@ static noinstr int mce_start(int *no_way_out)
/* /*
* Cache the global no_way_out state. * Cache the global no_way_out state.
*/ */
*no_way_out = atomic_read(&global_nwo); *no_way_out = arch_atomic_read(&global_nwo);
ret = order; ret = order;
...@@ -1153,12 +1133,12 @@ static noinstr int mce_end(int order) ...@@ -1153,12 +1133,12 @@ static noinstr int mce_end(int order)
return ret; return ret;
} }
static void mce_clear_state(unsigned long *toclear) static __always_inline void mce_clear_state(unsigned long *toclear)
{ {
int i; int i;
for (i = 0; i < this_cpu_read(mce_num_banks); i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
if (test_bit(i, toclear)) if (arch_test_bit(i, toclear))
mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
} }
} }
...@@ -1208,8 +1188,8 @@ __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final, ...@@ -1208,8 +1188,8 @@ __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
int severity, i, taint = 0; int severity, i, taint = 0;
for (i = 0; i < this_cpu_read(mce_num_banks); i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
__clear_bit(i, toclear); arch___clear_bit(i, toclear);
if (!test_bit(i, valid_banks)) if (!arch_test_bit(i, valid_banks))
continue; continue;
if (!mce_banks[i].ctl) if (!mce_banks[i].ctl)
...@@ -1244,7 +1224,7 @@ __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final, ...@@ -1244,7 +1224,7 @@ __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
severity == MCE_UCNA_SEVERITY) && !no_way_out) severity == MCE_UCNA_SEVERITY) && !no_way_out)
continue; continue;
__set_bit(i, toclear); arch___set_bit(i, toclear);
/* Machine check event was not enabled. Clear, but ignore. */ /* Machine check event was not enabled. Clear, but ignore. */
if (severity == MCE_NO_SEVERITY) if (severity == MCE_NO_SEVERITY)
......
...@@ -182,8 +182,6 @@ enum mca_msr { ...@@ -182,8 +182,6 @@ enum mca_msr {
MCA_MISC, MCA_MISC,
}; };
u32 mca_msr_reg(int bank, enum mca_msr reg);
/* Decide whether to add MCE record to MCE event pool or filter it out. */ /* Decide whether to add MCE record to MCE event pool or filter it out. */
extern bool filter_mce(struct mce *m); extern bool filter_mce(struct mce *m);
...@@ -209,4 +207,25 @@ static inline void winchip_machine_check(struct pt_regs *regs) {} ...@@ -209,4 +207,25 @@ static inline void winchip_machine_check(struct pt_regs *regs) {}
noinstr u64 mce_rdmsrl(u32 msr); noinstr u64 mce_rdmsrl(u32 msr);
static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
{
if (mce_flags.smca) {
switch (reg) {
case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank);
case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank);
case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank);
case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank);
}
}
switch (reg) {
case MCA_CTL: return MSR_IA32_MCx_CTL(bank);
case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank);
case MCA_MISC: return MSR_IA32_MCx_MISC(bank);
case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank);
}
return 0;
}
#endif /* __X86_MCE_INTERNAL_H__ */ #endif /* __X86_MCE_INTERNAL_H__ */
...@@ -301,7 +301,7 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs) ...@@ -301,7 +301,7 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
} }
} }
static int mce_severity_amd_smca(struct mce *m, enum context err_ctx) static __always_inline int mce_severity_amd_smca(struct mce *m, enum context err_ctx)
{ {
u64 mcx_cfg; u64 mcx_cfg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment