Commit 8d6b029e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-6.10-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Alexander Gordeev:

 - Do not create PT_LOAD program header for the kenel image when the
   virtual memory informaton in OS_INFO data is not available. That
   fixes stand-alone dump failures against kernels that do not provide
   the virtual memory informaton

 - Add KVM s390 shared zeropage selftest

* tag 's390-6.10-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  KVM: s390x: selftests: Add shared zeropage test
  s390/crash: Do not use VM info if os_info does not have it
parents 8d437867 01c51a32
......@@ -451,7 +451,7 @@ static void *nt_final(void *ptr)
/*
* Initialize ELF header (new kernel)
*/
static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
static void *ehdr_init(Elf64_Ehdr *ehdr, int phdr_count)
{
memset(ehdr, 0, sizeof(*ehdr));
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
......@@ -465,11 +465,8 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
/*
* Number of memory chunk PT_LOAD program headers plus one kernel
* image PT_LOAD program header plus one PT_NOTE program header.
*/
ehdr->e_phnum = mem_chunk_cnt + 1 + 1;
/* Number of PT_LOAD program headers plus PT_NOTE program header */
ehdr->e_phnum = phdr_count + 1;
return ehdr + 1;
}
......@@ -503,12 +500,14 @@ static int get_mem_chunk_cnt(void)
/*
* Initialize ELF loads (new kernel)
*/
static void loads_init(Elf64_Phdr *phdr)
static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm)
{
unsigned long old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
unsigned long old_identity_base = 0;
phys_addr_t start, end;
u64 idx;
if (os_info_has_vm)
old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
for_each_physmem_range(idx, &oldmem_type, &start, &end) {
phdr->p_type = PT_LOAD;
phdr->p_vaddr = old_identity_base + start;
......@@ -522,6 +521,11 @@ static void loads_init(Elf64_Phdr *phdr)
}
}
static bool os_info_has_vm(void)
{
return os_info_old_value(OS_INFO_KASLR_OFFSET);
}
/*
* Prepare PT_LOAD type program header for kernel image region
*/
......@@ -566,7 +570,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
return ptr;
}
static size_t get_elfcorehdr_size(int mem_chunk_cnt)
static size_t get_elfcorehdr_size(int phdr_count)
{
size_t size;
......@@ -581,10 +585,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
size += nt_vmcoreinfo_size();
/* nt_final */
size += sizeof(Elf64_Nhdr);
/* PT_LOAD type program header for kernel text region */
size += sizeof(Elf64_Phdr);
/* PT_LOADS */
size += mem_chunk_cnt * sizeof(Elf64_Phdr);
size += phdr_count * sizeof(Elf64_Phdr);
return size;
}
......@@ -595,8 +597,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
{
Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text;
int mem_chunk_cnt, phdr_text_cnt;
size_t alloc_size;
int mem_chunk_cnt;
void *ptr, *hdr;
u64 hdr_off;
......@@ -615,12 +617,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
}
mem_chunk_cnt = get_mem_chunk_cnt();
phdr_text_cnt = os_info_has_vm() ? 1 : 0;
alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
alloc_size = get_elfcorehdr_size(mem_chunk_cnt + phdr_text_cnt);
hdr = kzalloc(alloc_size, GFP_KERNEL);
/* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
/*
* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
* a dump with this crash kernel will fail. Panic now to allow other
* dump mechanisms to take over.
*/
......@@ -628,21 +632,23 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
panic("s390 kdump allocating elfcorehdr failed");
/* Init elf header */
ptr = ehdr_init(hdr, mem_chunk_cnt);
phdr_notes = ehdr_init(hdr, mem_chunk_cnt + phdr_text_cnt);
/* Init program headers */
phdr_notes = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
phdr_text = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
phdr_loads = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
if (phdr_text_cnt) {
phdr_text = phdr_notes + 1;
phdr_loads = phdr_text + 1;
} else {
phdr_loads = phdr_notes + 1;
}
ptr = PTR_ADD(phdr_loads, sizeof(Elf64_Phdr) * mem_chunk_cnt);
/* Init notes */
hdr_off = PTR_DIFF(ptr, hdr);
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
/* Init kernel text program header */
text_init(phdr_text);
if (phdr_text_cnt)
text_init(phdr_text);
/* Init loads */
loads_init(phdr_loads);
loads_init(phdr_loads, phdr_text_cnt);
/* Finalize program headers */
hdr_off = PTR_DIFF(ptr, hdr);
*addr = (unsigned long long) hdr;
......
......@@ -183,6 +183,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += s390x/debug_test
TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += guest_print_test
......
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test shared zeropage handling (with/without storage keys)
*
* Copyright (C) 2024, Red Hat, Inc.
*/
#include <sys/mman.h>
#include <linux/fs.h>
#include "test_util.h"
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
static void set_storage_key(void *addr, uint8_t skey)
{
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
}
static void guest_code(void)
{
/* Issue some storage key instruction. */
set_storage_key((void *)0, 0x98);
GUEST_DONE();
}
/*
* Returns 1 if the shared zeropage is mapped, 0 if something else is mapped.
* Returns < 0 on error or if nothing is mapped.
*/
static int maps_shared_zeropage(int pagemap_fd, void *addr)
{
struct page_region region;
struct pm_scan_arg arg = {
.start = (uintptr_t)addr,
.end = (uintptr_t)addr + 4096,
.vec = (uintptr_t)&region,
.vec_len = 1,
.size = sizeof(struct pm_scan_arg),
.category_mask = PAGE_IS_PFNZERO,
.category_anyof_mask = PAGE_IS_PRESENT,
.return_mask = PAGE_IS_PFNZERO,
};
return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
}
int main(int argc, char *argv[])
{
char *mem, *page0, *page1, *page2, tmp;
const size_t pagesize = getpagesize();
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
int pagemap_fd;
ksft_print_header();
ksft_set_plan(3);
/*
* We'll use memory that is not mapped into the VM for simplicity.
* Shared zeropages are enabled/disabled per-process.
*/
mem = mmap(0, 3 * pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
/* Disable THP. Ignore errors on older kernels. */
madvise(mem, 3 * pagesize, MADV_NOHUGEPAGE);
page0 = mem;
page1 = page0 + pagesize;
page2 = page1 + pagesize;
/* Can we even detect shared zeropages? */
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
TEST_REQUIRE(pagemap_fd >= 0);
tmp = *page0;
asm volatile("" : "+r" (tmp));
TEST_REQUIRE(maps_shared_zeropage(pagemap_fd, page0) == 1);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
/* Verify that we get the shared zeropage after VM creation. */
tmp = *page1;
asm volatile("" : "+r" (tmp));
ksft_test_result(maps_shared_zeropage(pagemap_fd, page1) == 1,
"Shared zeropages should be enabled\n");
/*
* Let our VM execute a storage key instruction that should
* unshare all shared zeropages.
*/
vcpu_run(vcpu);
get_ucall(vcpu, &uc);
TEST_ASSERT_EQ(uc.cmd, UCALL_DONE);
/* Verify that we don't have a shared zeropage anymore. */
ksft_test_result(!maps_shared_zeropage(pagemap_fd, page1),
"Shared zeropage should be gone\n");
/* Verify that we don't get any new shared zeropages. */
tmp = *page2;
asm volatile("" : "+r" (tmp));
ksft_test_result(!maps_shared_zeropage(pagemap_fd, page2),
"Shared zeropages should be disabled\n");
kvm_vm_free(vm);
ksft_finished();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment