Commit 7b0035ea authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Ensure all migrations are performed when test is affined

Rework the CPU selection in the migration worker to ensure the specified
number of migrations are performed when the test iteslf is affined to a
subset of CPUs.  The existing logic skips iterations if the target CPU is
not in the original set of possible CPUs, which causes the test to fail
if too many iterations are skipped.

  ==== Test Assertion Failure ====
  rseq_test.c:228: i > (NR_TASK_MIGRATIONS / 2)
  pid=10127 tid=10127 errno=4 - Interrupted system call
     1  0x00000000004018e5: main at rseq_test.c:227
     2  0x00007fcc8fc66bf6: ?? ??:0
     3  0x0000000000401959: _start at ??:?
  Only performed 4 KVM_RUNs, task stalled too much?

Calculate the min/max possible CPUs as a cheap "best effort" to avoid
high runtimes when the test is affined to a small percentage of CPUs.
Alternatively, a list or xarray of the possible CPUs could be used, but
even in a horrendously inefficient setup, such optimizations are not
needed because the runtime is completely dominated by the cost of
migrating the task, and the absolute runtime is well under a minute in
even truly absurd setups, e.g. running on a subset of vCPUs in a VM that
is heavily overcommited (16 vCPUs per pCPU).

Fixes: 61e52f16 ("KVM: selftests: Add a test for KVM_RUN+rseq to detect task migration bugs")
Reported-by: default avatarDongli Zhang <dongli.zhang@oracle.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210929234112.1862848-1-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e8a747d0
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <signal.h> #include <signal.h>
#include <syscall.h> #include <syscall.h>
#include <sys/ioctl.h> #include <sys/ioctl.h>
#include <sys/sysinfo.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/rseq.h> #include <linux/rseq.h>
...@@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = { ...@@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = {
static pthread_t migration_thread; static pthread_t migration_thread;
static cpu_set_t possible_mask; static cpu_set_t possible_mask;
static int min_cpu, max_cpu;
static bool done; static bool done;
static atomic_t seq_cnt; static atomic_t seq_cnt;
...@@ -57,20 +59,37 @@ static void sys_rseq(int flags) ...@@ -57,20 +59,37 @@ static void sys_rseq(int flags)
TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno)); TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
} }
static int next_cpu(int cpu)
{
/*
* Advance to the next CPU, skipping those that weren't in the original
* affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
* data storage is considered as opaque. Note, if this task is pinned
* to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
* burn a lot cycles and the test will take longer than normal to
* complete.
*/
do {
cpu++;
if (cpu > max_cpu) {
cpu = min_cpu;
TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
"Min CPU = %d must always be usable", cpu);
break;
}
} while (!CPU_ISSET(cpu, &possible_mask));
return cpu;
}
static void *migration_worker(void *ign) static void *migration_worker(void *ign)
{ {
cpu_set_t allowed_mask; cpu_set_t allowed_mask;
int r, i, nr_cpus, cpu; int r, i, cpu;
CPU_ZERO(&allowed_mask); CPU_ZERO(&allowed_mask);
nr_cpus = CPU_COUNT(&possible_mask); for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
cpu = i % nr_cpus;
if (!CPU_ISSET(cpu, &possible_mask))
continue;
CPU_SET(cpu, &allowed_mask); CPU_SET(cpu, &allowed_mask);
/* /*
...@@ -154,6 +173,36 @@ static void *migration_worker(void *ign) ...@@ -154,6 +173,36 @@ static void *migration_worker(void *ign)
return NULL; return NULL;
} }
static int calc_min_max_cpu(void)
{
int i, cnt, nproc;
if (CPU_COUNT(&possible_mask) < 2)
return -EINVAL;
/*
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
* this task is affined to in order to reduce the time spent querying
* unusable CPUs, e.g. if this task is pinned to a small percentage of
* total CPUs.
*/
nproc = get_nprocs_conf();
min_cpu = -1;
max_cpu = -1;
cnt = 0;
for (i = 0; i < nproc; i++) {
if (!CPU_ISSET(i, &possible_mask))
continue;
if (min_cpu == -1)
min_cpu = i;
max_cpu = i;
cnt++;
}
return (cnt < 2) ? -EINVAL : 0;
}
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
int r, i, snapshot; int r, i, snapshot;
...@@ -167,8 +216,8 @@ int main(int argc, char *argv[]) ...@@ -167,8 +216,8 @@ int main(int argc, char *argv[])
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno, TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno)); strerror(errno));
if (CPU_COUNT(&possible_mask) < 2) { if (calc_min_max_cpu()) {
print_skip("Only one CPU, task migration not possible\n"); print_skip("Only one usable CPU, task migration not possible");
exit(KSFT_SKIP); exit(KSFT_SKIP);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment