Commit f10f0481 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-rseq' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull rseq fixes from Paolo Bonzini:
 "A fix for a bug with restartable sequences and KVM.

  KVM's handling of TIF_NOTIFY_RESUME, e.g. for task migration, clears
  the flag without informing rseq and leads to stale data in userspace's
  rseq struct"

* tag 'for-linus-rseq' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: selftests: Remove __NR_userfaultfd syscall fallback
  KVM: selftests: Add a test for KVM_RUN+rseq to detect task migration bugs
  tools: Move x86 syscall number fallbacks to .../uapi/
  entry: rseq: Call rseq_handle_notify_resume() in tracehook_notify_resume()
  KVM: rseq: Update rseq when processing NOTIFY_RESUME on xfer to KVM guest
parents 9bc62afe 2da4a235
...@@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) ...@@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
uprobe_notify_resume(regs); uprobe_notify_resume(regs);
} else { } else {
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
} }
} }
local_irq_disable(); local_irq_disable();
......
...@@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) ...@@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs); do_signal(regs);
if (thread_flags & _TIF_NOTIFY_RESUME) { if (thread_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
if (thread_flags & _TIF_FOREIGN_FPSTATE) if (thread_flags & _TIF_FOREIGN_FPSTATE)
fpsimd_restore_current_state(); fpsimd_restore_current_state();
......
...@@ -260,8 +260,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, ...@@ -260,8 +260,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs); do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) { if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
} }
...@@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, ...@@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs); do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) { if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
user_enter(); user_enter();
} }
......
...@@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) ...@@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
do_signal(current); do_signal(current);
} }
if (thread_info_flags & _TIF_NOTIFY_RESUME) { if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
} }
static unsigned long get_tm_stackpointer(struct task_struct *tsk) static unsigned long get_tm_stackpointer(struct task_struct *tsk)
......
...@@ -197,6 +197,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) ...@@ -197,6 +197,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
mem_cgroup_handle_over_high(); mem_cgroup_handle_over_high();
blkcg_maybe_throttle_current(); blkcg_maybe_throttle_current();
rseq_handle_notify_resume(NULL, regs);
} }
/* /*
......
...@@ -171,10 +171,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, ...@@ -171,10 +171,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
handle_signal_work(regs, ti_work); handle_signal_work(regs, ti_work);
if (ti_work & _TIF_NOTIFY_RESUME) { if (ti_work & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
/* Architecture specific TIF work */ /* Architecture specific TIF work */
arch_exit_to_user_mode_work(regs, ti_work); arch_exit_to_user_mode_work(regs, ti_work);
......
...@@ -282,9 +282,17 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs) ...@@ -282,9 +282,17 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
if (unlikely(t->flags & PF_EXITING)) if (unlikely(t->flags & PF_EXITING))
return; return;
/*
* regs is NULL if and only if the caller is in a syscall path. Skip
* fixup and leave rseq_cs as is so that rseq_sycall() will detect and
* kill a misbehaving userspace on debug kernels.
*/
if (regs) {
ret = rseq_ip_fixup(regs); ret = rseq_ip_fixup(regs);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
goto error; goto error;
}
if (unlikely(rseq_update_cpu_id(t))) if (unlikely(rseq_update_cpu_id(t)))
goto error; goto error;
return; return;
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NR_userfaultfd
#define __NR_userfaultfd 282
#endif
#ifndef __NR_perf_event_open #ifndef __NR_perf_event_open
# define __NR_perf_event_open 298 # define __NR_perf_event_open 298
#endif #endif
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
/kvm_page_table_test /kvm_page_table_test
/memslot_modification_stress_test /memslot_modification_stress_test
/memslot_perf_test /memslot_perf_test
/rseq_test
/set_memory_region_test /set_memory_region_test
/steal_time /steal_time
/kvm_binary_stats_test /kvm_binary_stats_test
...@@ -80,6 +80,7 @@ TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus ...@@ -80,6 +80,7 @@ TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
TEST_GEN_PROGS_x86_64 += kvm_page_table_test TEST_GEN_PROGS_x86_64 += kvm_page_table_test
TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
TEST_GEN_PROGS_x86_64 += memslot_perf_test TEST_GEN_PROGS_x86_64 += memslot_perf_test
TEST_GEN_PROGS_x86_64 += rseq_test
TEST_GEN_PROGS_x86_64 += set_memory_region_test TEST_GEN_PROGS_x86_64 += set_memory_region_test
TEST_GEN_PROGS_x86_64 += steal_time TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
...@@ -93,6 +94,7 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test ...@@ -93,6 +94,7 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test TEST_GEN_PROGS_aarch64 += kvm_page_table_test
TEST_GEN_PROGS_aarch64 += rseq_test
TEST_GEN_PROGS_aarch64 += set_memory_region_test TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time TEST_GEN_PROGS_aarch64 += steal_time
TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
...@@ -104,6 +106,7 @@ TEST_GEN_PROGS_s390x += demand_paging_test ...@@ -104,6 +106,7 @@ TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
TEST_GEN_PROGS_s390x += kvm_page_table_test TEST_GEN_PROGS_s390x += kvm_page_table_test
TEST_GEN_PROGS_s390x += rseq_test
TEST_GEN_PROGS_s390x += set_memory_region_test TEST_GEN_PROGS_s390x += set_memory_region_test
TEST_GEN_PROGS_s390x += kvm_binary_stats_test TEST_GEN_PROGS_s390x += kvm_binary_stats_test
......
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <syscall.h>
#include <sys/ioctl.h>
#include <asm/barrier.h>
#include <linux/atomic.h>
#include <linux/rseq.h>
#include <linux/unistd.h>
#include "kvm_util.h"
#include "processor.h"
#include "test_util.h"
#define VCPU_ID 0
static __thread volatile struct rseq __rseq = {
.cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
};
/*
* Use an arbitrary, bogus signature for configuring rseq, this test does not
* actually enter an rseq critical section.
*/
#define RSEQ_SIG 0xdeadbeef
/*
* Any bug related to task migration is likely to be timing-dependent; perform
* a large number of migrations to reduce the odds of a false negative.
*/
#define NR_TASK_MIGRATIONS 100000
static pthread_t migration_thread;
static cpu_set_t possible_mask;
static bool done;
static atomic_t seq_cnt;
static void guest_code(void)
{
for (;;)
GUEST_SYNC(0);
}
static void sys_rseq(int flags)
{
int r;
r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG);
TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
}
static void *migration_worker(void *ign)
{
cpu_set_t allowed_mask;
int r, i, nr_cpus, cpu;
CPU_ZERO(&allowed_mask);
nr_cpus = CPU_COUNT(&possible_mask);
for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
cpu = i % nr_cpus;
if (!CPU_ISSET(cpu, &possible_mask))
continue;
CPU_SET(cpu, &allowed_mask);
/*
* Bump the sequence count twice to allow the reader to detect
* that a migration may have occurred in between rseq and sched
* CPU ID reads. An odd sequence count indicates a migration
* is in-progress, while a completely different count indicates
* a migration occurred since the count was last read.
*/
atomic_inc(&seq_cnt);
/*
* Ensure the odd count is visible while sched_getcpu() isn't
* stable, i.e. while changing affinity is in-progress.
*/
smp_wmb();
r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
errno, strerror(errno));
smp_wmb();
atomic_inc(&seq_cnt);
CPU_CLR(cpu, &allowed_mask);
/*
* Wait 1-10us before proceeding to the next iteration and more
* specifically, before bumping seq_cnt again. A delay is
* needed on three fronts:
*
* 1. To allow sched_setaffinity() to prompt migration before
* ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME
* (or TIF_NEED_RESCHED, which indirectly leads to handling
* NOTIFY_RESUME) is handled in KVM context.
*
* If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters
* the guest, the guest will trigger a IO/MMIO exit all the
* way to userspace and the TIF flags will be handled by
* the generic "exit to userspace" logic, not by KVM. The
* exit to userspace is necessary to give the test a chance
* to check the rseq CPU ID (see #2).
*
* Alternatively, guest_code() could include an instruction
* to trigger an exit that is handled by KVM, but any such
* exit requires architecture specific code.
*
* 2. To let ioctl(KVM_RUN) make its way back to the test
* before the next round of migration. The test's check on
* the rseq CPU ID must wait for migration to complete in
* order to avoid false positive, thus any kernel rseq bug
* will be missed if the next migration starts before the
* check completes.
*
* 3. To ensure the read-side makes efficient forward progress,
* e.g. if sched_getcpu() involves a syscall. Stalling the
* read-side means the test will spend more time waiting for
* sched_getcpu() to stabilize and less time trying to hit
* the timing-dependent bug.
*
* Because any bug in this area is likely to be timing-dependent,
* run with a range of delays at 1us intervals from 1us to 10us
* as a best effort to avoid tuning the test to the point where
* it can hit _only_ the original bug and not detect future
* regressions.
*
* The original bug can reproduce with a delay up to ~500us on
* x86-64, but starts to require more iterations to reproduce
* as the delay creeps above ~10us, and the average runtime of
* each iteration obviously increases as well. Cap the delay
* at 10us to keep test runtime reasonable while minimizing
* potential coverage loss.
*
* The lower bound for reproducing the bug is likely below 1us,
* e.g. failures occur on x86-64 with nanosleep(0), but at that
* point the overhead of the syscall likely dominates the delay.
* Use usleep() for simplicity and to avoid unnecessary kernel
* dependencies.
*/
usleep((i % 10) + 1);
}
done = true;
return NULL;
}
int main(int argc, char *argv[])
{
int r, i, snapshot;
struct kvm_vm *vm;
u32 cpu, rseq_cpu;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno));
if (CPU_COUNT(&possible_mask) < 2) {
print_skip("Only one CPU, task migration not possible\n");
exit(KSFT_SKIP);
}
sys_rseq(0);
/*
* Create and run a dummy VM that immediately exits to userspace via
* GUEST_SYNC, while concurrently migrating the process by setting its
* CPU affinity.
*/
vm = vm_create_default(VCPU_ID, 0, guest_code);
pthread_create(&migration_thread, NULL, migration_worker, 0);
for (i = 0; !done; i++) {
vcpu_run(vm, VCPU_ID);
TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
"Guest failed?");
/*
* Verify rseq's CPU matches sched's CPU. Ensure migration
* doesn't occur between sched_getcpu() and reading the rseq
* cpu_id by rereading both if the sequence count changes, or
* if the count is odd (migration in-progress).
*/
do {
/*
* Drop bit 0 to force a mismatch if the count is odd,
* i.e. if a migration is in-progress.
*/
snapshot = atomic_read(&seq_cnt) & ~1;
/*
* Ensure reading sched_getcpu() and rseq.cpu_id
* complete in a single "no migration" window, i.e. are
* not reordered across the seq_cnt reads.
*/
smp_rmb();
cpu = sched_getcpu();
rseq_cpu = READ_ONCE(__rseq.cpu_id);
smp_rmb();
} while (snapshot != atomic_read(&seq_cnt));
TEST_ASSERT(rseq_cpu == cpu,
"rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu);
}
/*
* Sanity check that the test was able to enter the guest a reasonable
* number of times, e.g. didn't get stalled too often/long waiting for
* sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a
* fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs
* than migrations given the 1us+ delay in the migration task.
*/
TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
"Only performed %d KVM_RUNs, task stalled too much?\n", i);
pthread_join(migration_thread, NULL);
kvm_vm_free(vm);
sys_rseq(RSEQ_FLAG_UNREGISTER);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment