Commit 95a3d445 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/kvmclock: Switch kvmclock data to a PER_CPU variable

The previous removal of the memblock dependency from kvmclock introduced a
static data array sized 64bytes * CONFIG_NR_CPUS. That's wasteful on large
systems when kvmclock is not used.

Replace it with:

 - A static page sized array of pvclock data. It's page sized because the
   pvclock data of the boot cpu is mapped into the VDSO so otherwise random
   other data would be exposed to the vDSO

 - A PER_CPU variable of pvclock data pointers. This is used to access the
   pcvlock data storage on each CPU.

The setup is done in two stages:

 - Early boot stores the pointer to the static page for the boot CPU in
   the per cpu data.

 - In the preparatory stage of CPU hotplug assign either an element of
   the static array (when the CPU number is in that range) or allocate
   memory and initialize the per cpu pointer.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPavel Tatashin <pasha.tatashin@oracle.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Cc: steven.sistare@oracle.com
Cc: daniel.m.jordan@oracle.com
Cc: linux@armlinux.org.uk
Cc: schwidefsky@de.ibm.com
Cc: heiko.carstens@de.ibm.com
Cc: john.stultz@linaro.org
Cc: sboyd@codeaurora.org
Cc: hpa@zytor.com
Cc: douly.fnst@cn.fujitsu.com
Cc: peterz@infradead.org
Cc: prarit@redhat.com
Cc: feng.tang@intel.com
Cc: pmladek@suse.com
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: linux-s390@vger.kernel.org
Cc: boris.ostrovsky@oracle.com
Cc: jgross@suse.com
Link: https://lkml.kernel.org/r/20180719205545.16512-8-pasha.tatashin@oracle.com
parent e499a9b6
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/cpuhotplug.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -55,12 +56,23 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); ...@@ -55,12 +56,23 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
/* Aligned to page sizes to match whats mapped via vsyscalls to userspace */ /* Aligned to page sizes to match whats mapped via vsyscalls to userspace */
#define HV_CLOCK_SIZE (sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS) #define HV_CLOCK_SIZE (sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS)
#define HVC_BOOT_ARRAY_SIZE \
(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
static u8 hv_clock_mem[PAGE_ALIGN(HV_CLOCK_SIZE)] __aligned(PAGE_SIZE); static struct pvclock_vsyscall_time_info
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
/* The hypervisor will put information about time periodically here */
static struct pvclock_vsyscall_time_info *hv_clock __ro_after_init;
static struct pvclock_wall_clock wall_clock; static struct pvclock_wall_clock wall_clock;
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
return &this_cpu_read(hv_clock_per_cpu)->pvti;
}
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
{
return this_cpu_read(hv_clock_per_cpu);
}
/* /*
* The wallclock is the time of day when we booted. Since then, some time may * The wallclock is the time of day when we booted. Since then, some time may
...@@ -69,17 +81,10 @@ static struct pvclock_wall_clock wall_clock; ...@@ -69,17 +81,10 @@ static struct pvclock_wall_clock wall_clock;
*/ */
static void kvm_get_wallclock(struct timespec64 *now) static void kvm_get_wallclock(struct timespec64 *now)
{ {
struct pvclock_vcpu_time_info *vcpu_time;
int cpu;
wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock)); wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
preempt_disable();
cpu = get_cpu(); pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now);
preempt_enable();
vcpu_time = &hv_clock[cpu].pvti;
pvclock_read_wallclock(&wall_clock, vcpu_time, now);
put_cpu();
} }
static int kvm_set_wallclock(const struct timespec64 *now) static int kvm_set_wallclock(const struct timespec64 *now)
...@@ -89,14 +94,10 @@ static int kvm_set_wallclock(const struct timespec64 *now) ...@@ -89,14 +94,10 @@ static int kvm_set_wallclock(const struct timespec64 *now)
static u64 kvm_clock_read(void) static u64 kvm_clock_read(void)
{ {
struct pvclock_vcpu_time_info *src;
u64 ret; u64 ret;
int cpu;
preempt_disable_notrace(); preempt_disable_notrace();
cpu = smp_processor_id(); ret = pvclock_clocksource_read(this_cpu_pvti());
src = &hv_clock[cpu].pvti;
ret = pvclock_clocksource_read(src);
preempt_enable_notrace(); preempt_enable_notrace();
return ret; return ret;
} }
...@@ -141,7 +142,7 @@ static inline void kvm_sched_clock_init(bool stable) ...@@ -141,7 +142,7 @@ static inline void kvm_sched_clock_init(bool stable)
static unsigned long kvm_get_tsc_khz(void) static unsigned long kvm_get_tsc_khz(void)
{ {
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
return pvclock_tsc_khz(&hv_clock[0].pvti); return pvclock_tsc_khz(this_cpu_pvti());
} }
static void kvm_get_preset_lpj(void) static void kvm_get_preset_lpj(void)
...@@ -158,15 +159,14 @@ static void kvm_get_preset_lpj(void) ...@@ -158,15 +159,14 @@ static void kvm_get_preset_lpj(void)
bool kvm_check_and_clear_guest_paused(void) bool kvm_check_and_clear_guest_paused(void)
{ {
struct pvclock_vcpu_time_info *src; struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
bool ret = false; bool ret = false;
if (!hv_clock) if (!src)
return ret; return ret;
src = &hv_clock[smp_processor_id()].pvti; if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) {
if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED;
src->flags &= ~PVCLOCK_GUEST_STOPPED;
pvclock_touch_watchdogs(); pvclock_touch_watchdogs();
ret = true; ret = true;
} }
...@@ -184,17 +184,15 @@ EXPORT_SYMBOL_GPL(kvm_clock); ...@@ -184,17 +184,15 @@ EXPORT_SYMBOL_GPL(kvm_clock);
static void kvm_register_clock(char *txt) static void kvm_register_clock(char *txt)
{ {
struct pvclock_vcpu_time_info *src; struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
int cpu = smp_processor_id();
u64 pa; u64 pa;
if (!hv_clock) if (!src)
return; return;
src = &hv_clock[cpu].pvti; pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
pa = slow_virt_to_phys(src) | 0x01ULL;
wrmsrl(msr_kvm_system_time, pa); wrmsrl(msr_kvm_system_time, pa);
pr_info("kvm-clock: cpu %d, msr %llx, %s", cpu, pa, txt); pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
} }
static void kvm_save_sched_clock_state(void) static void kvm_save_sched_clock_state(void)
...@@ -242,12 +240,12 @@ static int __init kvm_setup_vsyscall_timeinfo(void) ...@@ -242,12 +240,12 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
u8 flags; u8 flags;
if (!hv_clock || !kvmclock_vsyscall) if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
return 0; return 0;
flags = pvclock_read_flags(&hv_clock[0].pvti); flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
if (!(flags & PVCLOCK_TSC_STABLE_BIT)) if (!(flags & PVCLOCK_TSC_STABLE_BIT))
return 1; return 0;
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
#endif #endif
...@@ -255,6 +253,28 @@ static int __init kvm_setup_vsyscall_timeinfo(void) ...@@ -255,6 +253,28 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
} }
early_initcall(kvm_setup_vsyscall_timeinfo); early_initcall(kvm_setup_vsyscall_timeinfo);
static int kvmclock_setup_percpu(unsigned int cpu)
{
struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
/*
* The per cpu area setup replicates CPU0 data to all cpu
* pointers. So carefully check. CPU0 has been set up in init
* already.
*/
if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
return 0;
/* Use the static page for the first CPUs, allocate otherwise */
if (cpu < HVC_BOOT_ARRAY_SIZE)
p = &hv_clock_boot[cpu];
else
p = kzalloc(sizeof(*p), GFP_KERNEL);
per_cpu(hv_clock_per_cpu, cpu) = p;
return p ? 0 : -ENOMEM;
}
void __init kvmclock_init(void) void __init kvmclock_init(void)
{ {
u8 flags; u8 flags;
...@@ -269,17 +289,22 @@ void __init kvmclock_init(void) ...@@ -269,17 +289,22 @@ void __init kvmclock_init(void)
return; return;
} }
if (cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "kvmclock:setup_percpu",
kvmclock_setup_percpu, NULL) < 0) {
return;
}
pr_info("kvm-clock: Using msrs %x and %x", pr_info("kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock); msr_kvm_system_time, msr_kvm_wall_clock);
hv_clock = (struct pvclock_vsyscall_time_info *)hv_clock_mem; this_cpu_write(hv_clock_per_cpu, &hv_clock_boot[0]);
kvm_register_clock("primary cpu clock"); kvm_register_clock("primary cpu clock");
pvclock_set_pvti_cpu0_va(hv_clock); pvclock_set_pvti_cpu0_va(hv_clock_boot);
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
flags = pvclock_read_flags(&hv_clock[0].pvti); flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT); kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT);
x86_platform.calibrate_tsc = kvm_get_tsc_khz; x86_platform.calibrate_tsc = kvm_get_tsc_khz;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment