Commit 07cde313 authored by Eric Dumazet's avatar Eric Dumazet Committed by Thomas Gleixner

x86/msr: Allow rdmsr_safe_on_cpu() to schedule

High latencies can be observed caused by a daemon periodically reading
various MSR on all cpus. On KASAN enabled kernels ~10ms latencies can be
observed simply reading one MSR. Even without KASAN, sending an IPI to a
CPU, which is in a deep sleep state or in a long hard IRQ disabled section,
waiting for the answer can consume hundreds of microseconds.

All usage sites are in preemptible context, convert rdmsr_safe_on_cpu() to
use a completion instead of busy polling.

Overall daemon cpu usage was reduced by 35 %, and latencies caused by
msr_read() disappeared.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Link: https://lkml.kernel.org/r/20180323215818.127774-1-edumazet@google.com
parent 13cc36d7
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/completion.h>
#include <asm/msr.h> #include <asm/msr.h>
static void __rdmsr_on_cpu(void *info) static void __rdmsr_on_cpu(void *info)
...@@ -143,13 +144,19 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) ...@@ -143,13 +144,19 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
} }
EXPORT_SYMBOL(wrmsr_on_cpus); EXPORT_SYMBOL(wrmsr_on_cpus);
struct msr_info_completion {
struct msr_info msr;
struct completion done;
};
/* These "safe" variants are slower and should be used when the target MSR /* These "safe" variants are slower and should be used when the target MSR
may not actually exist. */ may not actually exist. */
static void __rdmsr_safe_on_cpu(void *info) static void __rdmsr_safe_on_cpu(void *info)
{ {
struct msr_info *rv = info; struct msr_info_completion *rv = info;
rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h); rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
complete(&rv->done);
} }
static void __wrmsr_safe_on_cpu(void *info) static void __wrmsr_safe_on_cpu(void *info)
...@@ -161,17 +168,26 @@ static void __wrmsr_safe_on_cpu(void *info) ...@@ -161,17 +168,26 @@ static void __wrmsr_safe_on_cpu(void *info)
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{ {
struct msr_info_completion rv;
call_single_data_t csd = {
.func = __rdmsr_safe_on_cpu,
.info = &rv,
};
int err; int err;
struct msr_info rv;
memset(&rv, 0, sizeof(rv)); memset(&rv, 0, sizeof(rv));
init_completion(&rv.done);
rv.msr.msr_no = msr_no;
rv.msr_no = msr_no; err = smp_call_function_single_async(cpu, &csd);
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); if (!err) {
*l = rv.reg.l; wait_for_completion(&rv.done);
*h = rv.reg.h; err = rv.msr.err;
}
*l = rv.msr.reg.l;
*h = rv.msr.reg.h;
return err ? err : rv.err; return err;
} }
EXPORT_SYMBOL(rdmsr_safe_on_cpu); EXPORT_SYMBOL(rdmsr_safe_on_cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment