Commit 6758034e authored by Thomas Gleixner's avatar Thomas Gleixner

x86/process/64: Make save_fsgs_for_kvm() ready for FSGSBASE

save_fsgs_for_kvm() is invoked via

  vcpu_enter_guest()
    kvm_x86_ops.prepare_guest_switch(vcpu)
      vmx_prepare_switch_to_guest()
        save_fsgs_for_kvm()

with preemption disabled, but interrupts enabled.

The upcoming FSGSBASE based GS safe needs interrupts to be disabled. This
could be done in the helper function, but that function is also called from
switch_to() which has interrupts disabled already.

Disable interrupts inside save_fsgs_for_kvm() and rename the function to
current_save_fsgs() so it can be invoked from other places.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20200528201402.1708239-7-sashal@kernel.org
parent 58edfd2e
...@@ -457,10 +457,8 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu) ...@@ -457,10 +457,8 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
DECLARE_PER_CPU(unsigned int, irq_count); DECLARE_PER_CPU(unsigned int, irq_count);
extern asmlinkage void ignore_sysret(void); extern asmlinkage void ignore_sysret(void);
#if IS_ENABLED(CONFIG_KVM)
/* Save actual FS/GS selectors and bases to current->thread */ /* Save actual FS/GS selectors and bases to current->thread */
void save_fsgs_for_kvm(void); void current_save_fsgs(void);
#endif
#else /* X86_64 */ #else /* X86_64 */
#ifdef CONFIG_STACKPROTECTOR #ifdef CONFIG_STACKPROTECTOR
/* /*
......
...@@ -240,18 +240,21 @@ static __always_inline void save_fsgs(struct task_struct *task) ...@@ -240,18 +240,21 @@ static __always_inline void save_fsgs(struct task_struct *task)
save_base_legacy(task, task->thread.gsindex, GS); save_base_legacy(task, task->thread.gsindex, GS);
} }
#if IS_ENABLED(CONFIG_KVM)
/* /*
* While a process is running,current->thread.fsbase and current->thread.gsbase * While a process is running,current->thread.fsbase and current->thread.gsbase
* may not match the corresponding CPU registers (see save_base_legacy()). KVM * may not match the corresponding CPU registers (see save_base_legacy()).
* wants an efficient way to save and restore FSBASE and GSBASE.
* When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
*/ */
void save_fsgs_for_kvm(void) void current_save_fsgs(void)
{ {
unsigned long flags;
/* Interrupts need to be off for FSGSBASE */
local_irq_save(flags);
save_fsgs(current); save_fsgs(current);
local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(save_fsgs_for_kvm); #if IS_ENABLED(CONFIG_KVM)
EXPORT_SYMBOL_GPL(current_save_fsgs);
#endif #endif
static __always_inline void loadseg(enum which_selector which, static __always_inline void loadseg(enum which_selector which,
......
...@@ -1172,7 +1172,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -1172,7 +1172,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
gs_base = cpu_kernelmode_gs_base(cpu); gs_base = cpu_kernelmode_gs_base(cpu);
if (likely(is_64bit_mm(current->mm))) { if (likely(is_64bit_mm(current->mm))) {
save_fsgs_for_kvm(); current_save_fsgs();
fs_sel = current->thread.fsindex; fs_sel = current->thread.fsindex;
gs_sel = current->thread.gsindex; gs_sel = current->thread.gsindex;
fs_base = current->thread.fsbase; fs_base = current->thread.fsbase;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment