Commit 356dab4e authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-6.12-1' of...

Merge tag 'kvm-s390-next-6.12-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

* New ucontrol selftest
* Inline assembly touchups
parents 0cdcc99e f9b56b2c
......@@ -59,6 +59,7 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
......
......@@ -348,20 +348,29 @@ static inline int plo_test_bit(unsigned char nr)
return cc == 0;
}
static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
static __always_inline void __sortl_query(u8 (*query)[32])
{
asm volatile(
" lghi 0,0\n"
" lgr 1,%[query]\n"
" la 1,%[query]\n"
/* Parameter registers are ignored */
" .insn rrf,%[opc] << 16,2,4,6,0\n"
" .insn rre,0xb9380000,2,4\n"
: [query] "=R" (*query)
:
: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
: "cc", "memory", "0", "1");
: "cc", "0", "1");
}
#define INSN_SORTL 0xb938
#define INSN_DFLTCC 0xb939
static __always_inline void __dfltcc_query(u8 (*query)[32])
{
asm volatile(
" lghi 0,0\n"
" la 1,%[query]\n"
/* Parameter registers are ignored */
" .insn rrf,0xb9390000,2,4,6,0\n"
: [query] "=R" (*query)
:
: "cc", "0", "1");
}
static void __init kvm_s390_cpu_feat_init(void)
{
......@@ -415,10 +424,10 @@ static void __init kvm_s390_cpu_feat_init(void)
kvm_s390_available_subfunc.kdsa);
if (test_facility(150)) /* SORTL */
__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
__sortl_query(&kvm_s390_available_subfunc.sortl);
if (test_facility(151)) /* DFLTCC */
__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
__dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
......
......@@ -5,3 +5,4 @@
!*.h
!*.S
!*.sh
!config
......@@ -188,6 +188,7 @@ TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += s390x/debug_test
TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
TEST_GEN_PROGS_s390x += s390x/ucontrol_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += guest_print_test
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Definition for kernel virtual machines on s390x
*
* Copyright IBM Corp. 2024
*
* Authors:
* Christoph Schlameuss <schlameuss@linux.ibm.com>
*/
#ifndef SELFTEST_KVM_DEBUG_PRINT_H
#define SELFTEST_KVM_DEBUG_PRINT_H
#include "asm/ptrace.h"
#include "kvm_util.h"
#include "sie.h"
static inline void print_hex_bytes(const char *name, u64 addr, size_t len)
{
u64 pos;
pr_debug("%s (%p)\n", name, (void *)addr);
pr_debug(" 0/0x00---------|");
if (len > 8)
pr_debug(" 8/0x08---------|");
if (len > 16)
pr_debug(" 16/0x10--------|");
if (len > 24)
pr_debug(" 24/0x18--------|");
for (pos = 0; pos < len; pos += 8) {
if ((pos % 32) == 0)
pr_debug("\n %3lu 0x%.3lx ", pos, pos);
pr_debug(" %16lx", *((u64 *)(addr + pos)));
}
pr_debug("\n");
}
static inline void print_hex(const char *name, u64 addr)
{
print_hex_bytes(name, addr, 512);
}
static inline void print_psw(struct kvm_run *run, struct kvm_s390_sie_block *sie_block)
{
pr_debug("flags:0x%x psw:0x%.16llx:0x%.16llx exit:%u %s\n",
run->flags,
run->psw_mask, run->psw_addr,
run->exit_reason, exit_reason_str(run->exit_reason));
pr_debug("sie_block psw:0x%.16llx:0x%.16llx\n",
sie_block->psw_mask, sie_block->psw_addr);
}
static inline void print_run(struct kvm_run *run, struct kvm_s390_sie_block *sie_block)
{
print_hex_bytes("run", (u64)run, 0x150);
print_hex("sie_block", (u64)sie_block);
print_psw(run, sie_block);
}
static inline void print_regs(struct kvm_run *run)
{
struct kvm_sync_regs *sync_regs = &run->s.regs;
print_hex_bytes("GPRS", (u64)sync_regs->gprs, 8 * NUM_GPRS);
print_hex_bytes("ACRS", (u64)sync_regs->acrs, 4 * NUM_ACRS);
print_hex_bytes("CRS", (u64)sync_regs->crs, 8 * NUM_CRS);
}
#endif /* SELFTEST_KVM_DEBUG_PRINT_H */
......@@ -21,6 +21,11 @@
#define PAGE_PROTECT 0x200 /* HW read-only bit */
#define PAGE_NOEXEC 0x100 /* HW no-execute bit */
/* Page size definitions */
#define PAGE_SHIFT 12
#define PAGE_SIZE BIT_ULL(PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
/* Is there a portable way to do this? */
static inline void cpu_relax(void)
{
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definition for kernel virtual machines on s390.
*
* Adapted copy of struct definition kvm_s390_sie_block from
* arch/s390/include/asm/kvm_host.h for use in userspace selftest programs.
*
* Copyright IBM Corp. 2008, 2024
*
* Authors:
* Christoph Schlameuss <schlameuss@linux.ibm.com>
* Carsten Otte <cotte@de.ibm.com>
*/
#ifndef SELFTEST_KVM_SIE_H
#define SELFTEST_KVM_SIE_H
#include <linux/types.h>
struct kvm_s390_sie_block {
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT 0x04000000
#define CPUSTAT_IO_INT 0x02000000
#define CPUSTAT_EXT_INT 0x01000000
#define CPUSTAT_RUNNING 0x00800000
#define CPUSTAT_RETAINED 0x00400000
#define CPUSTAT_TIMING_SUB 0x00020000
#define CPUSTAT_SIE_SUB 0x00010000
#define CPUSTAT_RRF 0x00008000
#define CPUSTAT_SLSV 0x00004000
#define CPUSTAT_SLSR 0x00002000
#define CPUSTAT_ZARCH 0x00000800
#define CPUSTAT_MCDS 0x00000100
#define CPUSTAT_KSS 0x00000200
#define CPUSTAT_SM 0x00000080
#define CPUSTAT_IBS 0x00000040
#define CPUSTAT_GED2 0x00000010
#define CPUSTAT_G 0x00000008
#define CPUSTAT_GED 0x00000004
#define CPUSTAT_J 0x00000002
#define CPUSTAT_P 0x00000001
__u32 cpuflags; /* 0x0000 */
__u32: 1; /* 0x0004 */
__u32 prefix : 18;
__u32: 1;
__u32 ibc : 12;
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE BIT(0)
__u32 prog0c; /* 0x000c */
union {
__u8 reserved10[16]; /* 0x0010 */
struct {
__u64 pv_handle_cpu;
__u64 pv_handle_config;
};
};
#define PROG_BLOCK_SIE BIT(0)
#define PROG_REQUEST BIT(1)
__u32 prog20; /* 0x0020 */
__u8 reserved24[4]; /* 0x0024 */
__u64 cputm; /* 0x0028 */
__u64 ckc; /* 0x0030 */
__u64 epoch; /* 0x0038 */
__u32 svcc; /* 0x0040 */
#define LCTL_CR0 0x8000
#define LCTL_CR6 0x0200
#define LCTL_CR9 0x0040
#define LCTL_CR10 0x0020
#define LCTL_CR11 0x0010
#define LCTL_CR14 0x0002
__u16 lctl; /* 0x0044 */
__s16 icpua; /* 0x0046 */
#define ICTL_OPEREXC 0x80000000
#define ICTL_PINT 0x20000000
#define ICTL_LPSW 0x00400000
#define ICTL_STCTL 0x00040000
#define ICTL_ISKE 0x00004000
#define ICTL_SSKE 0x00002000
#define ICTL_RRBE 0x00001000
#define ICTL_TPROT 0x00000200
__u32 ictl; /* 0x0048 */
#define ECA_CEI 0x80000000
#define ECA_IB 0x40000000
#define ECA_SIGPI 0x10000000
#define ECA_MVPGI 0x01000000
#define ECA_AIV 0x00200000
#define ECA_VX 0x00020000
#define ECA_PROTEXCI 0x00002000
#define ECA_APIE 0x00000008
#define ECA_SII 0x00000001
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
#define ICPT_PROGI 0x08
#define ICPT_INSTPROGI 0x0C
#define ICPT_EXTREQ 0x10
#define ICPT_EXTINT 0x14
#define ICPT_IOREQ 0x18
#define ICPT_WAIT 0x1c
#define ICPT_VALIDITY 0x20
#define ICPT_STOP 0x28
#define ICPT_OPEREXC 0x2C
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
#define ICPT_KSS 0x5c
#define ICPT_MCHKREQ 0x60
#define ICPT_INT_ENABLE 0x64
#define ICPT_PV_INSTR 0x68
#define ICPT_PV_NOTIFY 0x6c
#define ICPT_PV_PREF 0x70
__u8 icptcode; /* 0x0050 */
__u8 icptstatus; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */
__u8 reserved54; /* 0x0054 */
#define IICTL_CODE_NONE 0x00
#define IICTL_CODE_MCHK 0x01
#define IICTL_CODE_EXT 0x02
#define IICTL_CODE_IO 0x03
#define IICTL_CODE_RESTART 0x04
#define IICTL_CODE_SPECIFICATION 0x10
#define IICTL_CODE_OPERAND 0x11
__u8 iictl; /* 0x0055 */
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
#define FPF_BPBC 0x20
__u8 fpf; /* 0x0060 */
#define ECB_GS 0x40
#define ECB_TE 0x10
#define ECB_SPECI 0x08
#define ECB_SRSI 0x04
#define ECB_HOSTPROTINT 0x02
#define ECB_PTF 0x01
__u8 ecb; /* 0x0061 */
#define ECB2_CMMA 0x80
#define ECB2_IEP 0x20
#define ECB2_PFMFI 0x08
#define ECB2_ESCA 0x04
#define ECB2_ZPCI_LSI 0x02
__u8 ecb2; /* 0x0062 */
#define ECB3_AISI 0x20
#define ECB3_AISII 0x10
#define ECB3_DEA 0x08
#define ECB3_AES 0x04
#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
#define ESCA_SCAOL_MASK ~0x3fU
__u32 scaol; /* 0x0064 */
__u8 sdf; /* 0x0068 */
__u8 epdx; /* 0x0069 */
__u8 cpnc; /* 0x006a */
__u8 reserved6b; /* 0x006b */
__u32 todpr; /* 0x006c */
#define GISA_FORMAT1 0x00000001
__u32 gd; /* 0x0070 */
__u8 reserved74[12]; /* 0x0074 */
__u64 mso; /* 0x0080 */
__u64 msl; /* 0x0088 */
__u64 psw_mask; /* 0x0090 */
__u64 psw_addr; /* 0x0098 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
__u8 reservedb0[8]; /* 0x00b0 */
#define HPID_KVM 0x4
#define HPID_VSIE 0x5
__u8 hpid; /* 0x00b8 */
__u8 reservedb9[7]; /* 0x00b9 */
union {
struct {
__u32 eiparams; /* 0x00c0 */
__u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */
};
__u64 mcic; /* 0x00c0 */
} __packed;
__u32 reservedc8; /* 0x00c8 */
union {
struct {
__u16 pgmilc; /* 0x00cc */
__u16 iprcc; /* 0x00ce */
};
__u32 edc; /* 0x00cc */
} __packed;
union {
struct {
__u32 dxc; /* 0x00d0 */
__u16 mcn; /* 0x00d4 */
__u8 perc; /* 0x00d6 */
__u8 peratmid; /* 0x00d7 */
};
__u64 faddr; /* 0x00d0 */
} __packed;
__u64 peraddr; /* 0x00d8 */
__u8 eai; /* 0x00e0 */
__u8 peraid; /* 0x00e1 */
__u8 oai; /* 0x00e2 */
__u8 armid; /* 0x00e3 */
__u8 reservede4[4]; /* 0x00e4 */
union {
__u64 tecmc; /* 0x00e8 */
struct {
__u16 subchannel_id; /* 0x00e8 */
__u16 subchannel_nr; /* 0x00ea */
__u32 io_int_parm; /* 0x00ec */
__u32 io_int_word; /* 0x00f0 */
};
} __packed;
__u8 reservedf4[8]; /* 0x00f4 */
#define CRYCB_FORMAT_MASK 0x00000003
#define CRYCB_FORMAT0 0x00000000
#define CRYCB_FORMAT1 0x00000001
#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
union {
__u64 gbea; /* 0x0180 */
__u64 sidad;
};
__u8 reserved188[8]; /* 0x0188 */
__u64 sdnxo; /* 0x0190 */
__u8 reserved198[8]; /* 0x0198 */
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
__u8 reserved1c0[8]; /* 0x01c0 */
#define ECD_HOSTREGMGMT 0x20000000
#define ECD_MEF 0x08000000
#define ECD_ETOKENF 0x02000000
#define ECD_ECC 0x00200000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
__u64 riccbd; /* 0x01f0 */
__u64 gvrd; /* 0x01f8 */
} __packed __aligned(512);
#endif /* SELFTEST_KVM_SIE_H */
......@@ -14,7 +14,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
vm_paddr_t paddr;
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
if (vm->pgd_created)
......@@ -79,7 +79,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
}
/* Fill in page table entry */
idx = (gva >> 12) & 0x0ffu; /* page index */
idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */
if (!(entry[idx] & PAGE_INVALID))
fprintf(stderr,
"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
......@@ -91,7 +91,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
int ri, idx;
uint64_t *entry;
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
entry = addr_gpa2hva(vm, vm->pgd);
......@@ -103,7 +103,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
}
idx = (gva >> 12) & 0x0ffu; /* page index */
idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */
TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
"No page mapping for vm virtual address 0x%lx", gva);
......@@ -168,7 +168,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
......
......@@ -17,16 +17,17 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
#include "processor.h"
#define MAIN_PAGE_COUNT 512
#define TEST_DATA_PAGE_COUNT 512
#define TEST_DATA_MEMSLOT 1
#define TEST_DATA_START_GFN 4096
#define TEST_DATA_START_GFN PAGE_SIZE
#define TEST_DATA_TWO_PAGE_COUNT 256
#define TEST_DATA_TWO_MEMSLOT 2
#define TEST_DATA_TWO_START_GFN 8192
#define TEST_DATA_TWO_START_GFN (2 * PAGE_SIZE)
static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
......@@ -66,7 +67,7 @@ static void guest_dirty_test_data(void)
" lghi 5,%[page_count]\n"
/* r5 += r1 */
"2: agfr 5,1\n"
/* r2 = r1 << 12 */
/* r2 = r1 << PAGE_SHIFT */
"1: sllg 2,1,12(0)\n"
/* essa(r4, r2, SET_STABLE) */
" .insn rrf,0xb9ab0000,4,2,1,0\n"
......
CONFIG_KVM=y
CONFIG_KVM_S390_UCONTROL=y
......@@ -2,12 +2,12 @@
/* Test KVM debugging features. */
#include "kvm_util.h"
#include "test_util.h"
#include "sie.h"
#include <linux/kvm.h>
#define __LC_SVC_NEW_PSW 0x1c0
#define __LC_PGM_NEW_PSW 0x1d0
#define ICPT_INSTRUCTION 0x04
#define IPA0_DIAG 0x8300
#define PGM_SPECIFICATION 0x06
......@@ -85,7 +85,7 @@ static void test_step_pgm_diag(void)
vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code,
__LC_PGM_NEW_PSW, new_psw);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INSTRUCTION);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INST);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa & 0xff00, IPA0_DIAG);
vcpu_ioctl(vcpu, KVM_S390_IRQ, &irq);
vcpu_run(vcpu);
......
......@@ -16,6 +16,7 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
#include "processor.h"
enum mop_target {
LOGICAL,
......@@ -226,9 +227,6 @@ static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
#define PAGE_SHIFT 12
#define PAGE_SIZE (1ULL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
......
......@@ -9,9 +9,8 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
#include "processor.h"
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
......@@ -151,7 +150,7 @@ static enum stage perform_next_stage(int *i, bool mapped_0)
* instead.
* In order to skip these tests we detect this inside the guest
*/
skip = tests[*i].addr < (void *)4096 &&
skip = tests[*i].addr < (void *)PAGE_SIZE &&
tests[*i].expected != TRANSL_UNAVAIL &&
!mapped_0;
if (!skip) {
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test code for the s390x kvm ucontrol interface
*
* Copyright IBM Corp. 2024
*
* Authors:
* Christoph Schlameuss <schlameuss@linux.ibm.com>
*/
#include "debug_print.h"
#include "kselftest_harness.h"
#include "kvm_util.h"
#include "processor.h"
#include "sie.h"
#include <linux/capability.h>
#include <linux/sizes.h>
#define VM_MEM_SIZE (4 * SZ_1M)
/* so directly declare capget to check caps without libcap */
int capget(cap_user_header_t header, cap_user_data_t data);
/**
* In order to create user controlled virtual machines on S390,
* check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL
* as privileged user (SYS_ADMIN).
*/
void require_ucontrol_admin(void)
{
struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
struct __user_cap_header_struct hdr = {
.version = _LINUX_CAPABILITY_VERSION_3,
};
int rc;
rc = capget(&hdr, data);
TEST_ASSERT_EQ(0, rc);
TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0);
TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_UCONTROL));
}
/* Test program setting some registers and looping */
extern char test_gprs_asm[];
asm("test_gprs_asm:\n"
"xgr %r0, %r0\n"
"lgfi %r1,1\n"
"lgfi %r2,2\n"
"lgfi %r3,3\n"
"lgfi %r4,4\n"
"lgfi %r5,5\n"
"lgfi %r6,6\n"
"lgfi %r7,7\n"
"0:\n"
" diag 0,0,0x44\n"
" ahi %r0,1\n"
" j 0b\n"
);
FIXTURE(uc_kvm)
{
struct kvm_s390_sie_block *sie_block;
struct kvm_run *run;
uintptr_t base_gpa;
uintptr_t code_gpa;
uintptr_t base_hva;
uintptr_t code_hva;
int kvm_run_size;
void *vm_mem;
int vcpu_fd;
int kvm_fd;
int vm_fd;
};
/**
* create VM with single vcpu, map kvm_run and SIE control block for easy access
*/
FIXTURE_SETUP(uc_kvm)
{
struct kvm_s390_vm_cpu_processor info;
int rc;
require_ucontrol_admin();
self->kvm_fd = open_kvm_dev_path_or_exit();
self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
ASSERT_GE(self->vm_fd, 0);
kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL,
KVM_S390_VM_CPU_PROCESSOR, &info);
TH_LOG("create VM 0x%llx", info.cpuid);
self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
ASSERT_GE(self->vcpu_fd, 0);
self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
ASSERT_NE(self->run, MAP_FAILED);
/**
* For virtual cpus that have been created with S390 user controlled
* virtual machines, the resulting vcpu fd can be memory mapped at page
* offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
* the virtual cpu's hardware control block.
*/
self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED,
self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
ASSERT_NE(self->sie_block, MAP_FAILED);
TH_LOG("VM created %p %p", self->run, self->sie_block);
self->base_gpa = 0;
self->code_gpa = self->base_gpa + (3 * SZ_1M);
self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_SIZE);
ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
self->base_hva = (uintptr_t)self->vm_mem;
self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
struct kvm_s390_ucas_mapping map = {
.user_addr = self->base_hva,
.vcpu_addr = self->base_gpa,
.length = VM_MEM_SIZE,
};
TH_LOG("ucas map %p %p 0x%llx",
(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s",
rc, strerror(errno));
TH_LOG("page in %p", (void *)self->base_gpa);
rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa);
ASSERT_EQ(0, rc) TH_LOG("vcpu fault (%p) result %d not expected, %s",
(void *)self->base_hva, rc, strerror(errno));
self->sie_block->cpuflags &= ~CPUSTAT_STOPPED;
}
FIXTURE_TEARDOWN(uc_kvm)
{
munmap(self->sie_block, PAGE_SIZE);
munmap(self->run, self->kvm_run_size);
close(self->vcpu_fd);
close(self->vm_fd);
close(self->kvm_fd);
free(self->vm_mem);
}
TEST_F(uc_kvm, uc_sie_assertions)
{
/* assert interception of Code 08 (Program Interruption) is set */
EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI);
}
TEST_F(uc_kvm, uc_attr_mem_limit)
{
u64 limit;
struct kvm_device_attr attr = {
.group = KVM_S390_VM_MEM_CTRL,
.attr = KVM_S390_VM_MEM_LIMIT_SIZE,
.addr = (unsigned long)&limit,
};
int rc;
rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr);
EXPECT_EQ(0, rc);
EXPECT_EQ(~0UL, limit);
/* assert set not supported */
rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr);
EXPECT_EQ(-1, rc);
EXPECT_EQ(EINVAL, errno);
}
TEST_F(uc_kvm, uc_no_dirty_log)
{
struct kvm_dirty_log dlog;
int rc;
rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog);
EXPECT_EQ(-1, rc);
EXPECT_EQ(EINVAL, errno);
}
/**
* Assert HPAGE CAP cannot be enabled on UCONTROL VM
*/
TEST(uc_cap_hpage)
{
int rc, kvm_fd, vm_fd, vcpu_fd;
struct kvm_enable_cap cap = {
.cap = KVM_CAP_S390_HPAGE_1M,
};
require_ucontrol_admin();
kvm_fd = open_kvm_dev_path_or_exit();
vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
ASSERT_GE(vm_fd, 0);
/* assert hpages are not supported on ucontrol vm */
rc = ioctl(vm_fd, KVM_CHECK_EXTENSION, KVM_CAP_S390_HPAGE_1M);
EXPECT_EQ(0, rc);
/* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */
rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
EXPECT_EQ(-1, rc);
EXPECT_EQ(EINVAL, errno);
/* assert HPAGE CAP is rejected after vCPU creation */
vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
ASSERT_GE(vcpu_fd, 0);
rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
EXPECT_EQ(-1, rc);
EXPECT_EQ(EBUSY, errno);
close(vcpu_fd);
close(vm_fd);
close(kvm_fd);
}
/* verify SIEIC exit
* * fail on codes not expected in the test cases
*/
static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
{
struct kvm_s390_sie_block *sie_block = self->sie_block;
struct kvm_run *run = self->run;
/* check SIE interception code */
pr_info("sieic: 0x%.2x 0x%.4x 0x%.4x\n",
run->s390_sieic.icptcode,
run->s390_sieic.ipa,
run->s390_sieic.ipb);
switch (run->s390_sieic.icptcode) {
case ICPT_INST:
/* end execution in caller on intercepted instruction */
pr_info("sie instruction interception\n");
return false;
case ICPT_OPEREXC:
/* operation exception */
TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
default:
TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
}
return true;
}
/* verify VM state on exit */
static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self)
{
struct kvm_run *run = self->run;
switch (run->exit_reason) {
case KVM_EXIT_S390_SIEIC:
return uc_handle_sieic(self);
default:
pr_info("exit_reason %2d not handled\n", run->exit_reason);
}
return true;
}
/* run the VM until interrupted */
static int uc_run_once(FIXTURE_DATA(uc_kvm) * self)
{
int rc;
rc = ioctl(self->vcpu_fd, KVM_RUN, NULL);
print_run(self->run, self->sie_block);
print_regs(self->run);
pr_debug("run %d / %d %s\n", rc, errno, strerror(errno));
return rc;
}
static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self)
{
struct kvm_s390_sie_block *sie_block = self->sie_block;
/* assert vm was interrupted by diag 0x0044 */
TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
TEST_ASSERT_EQ(0x8300, sie_block->ipa);
TEST_ASSERT_EQ(0x440000, sie_block->ipb);
}
TEST_F(uc_kvm, uc_gprs)
{
struct kvm_sync_regs *sync_regs = &self->run->s.regs;
struct kvm_run *run = self->run;
struct kvm_regs regs = {};
/* Set registers to values that are different from the ones that we expect below */
for (int i = 0; i < 8; i++)
sync_regs->gprs[i] = 8;
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
/* copy test_gprs_asm to code_hva / code_gpa */
TH_LOG("copy code %p to vm mapped memory %p / %p",
&test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa);
memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
/* DAT disabled + 64 bit mode */
run->psw_mask = 0x0000000180000000ULL;
run->psw_addr = self->code_gpa;
/* run and expect interception of diag 44 */
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(false, uc_handle_exit(self));
uc_assert_diag44(self);
/* Retrieve and check guest register values */
ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
for (int i = 0; i < 8; i++) {
ASSERT_EQ(i, regs.gprs[i]);
ASSERT_EQ(i, sync_regs->gprs[i]);
}
/* run and expect interception of diag 44 again */
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(false, uc_handle_exit(self));
uc_assert_diag44(self);
/* check continued increment of register 0 value */
ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
ASSERT_EQ(1, regs.gprs[0]);
ASSERT_EQ(1, sync_regs->gprs[0]);
}
TEST_HARNESS_MAIN
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment