Commit 847d4287 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

 - Remove address space overrides using set_fs()

 - Convert to generic vDSO

 - Convert to generic page table dumper

 - Add ARCH_HAS_DEBUG_WX support

 - Add leap seconds handling support

 - Add NVMe firmware-assisted kernel dump support

 - Extend NVMe boot support with memory clearing control and addition of
   kernel parameters

 - AP bus and zcrypt api code rework. Add adapter configure/deconfigure
   interface. Extend debug features. Add failure injection support

 - Add ECC secure private keys support

 - Add KASan support for running protected virtualization host with
   4-level paging

 - Utilize destroy page ultravisor call to speed up secure guests
   shutdown

 - Implement ioremap_wc() and ioremap_prot() with MIO in PCI code

 - Various checksum improvements

 - Other small various fixes and improvements all over the code

* tag 's390-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (85 commits)
  s390/uaccess: fix indentation
  s390/uaccess: add default cases for __put_user_fn()/__get_user_fn()
  s390/zcrypt: fix wrong format specifications
  s390/kprobes: move insn_page to text segment
  s390/sie: fix typo in SIGP code description
  s390/lib: fix kernel doc for memcmp()
  s390/zcrypt: Introduce Failure Injection feature
  s390/zcrypt: move ap_msg param one level up the call chain
  s390/ap/zcrypt: revisit ap and zcrypt error handling
  s390/ap: Support AP card SCLP config and deconfig operations
  s390/sclp: Add support for SCLP AP adapter config/deconfig
  s390/ap: add card/queue deconfig state
  s390/ap: add error response code field for ap queue devices
  s390/ap: split ap queue state machine state from device state
  s390/zcrypt: New config switch CONFIG_ZCRYPT_DEBUG
  s390/zcrypt: introduce msg tracking in zcrypt functions
  s390/startup: correct early pgm check info formatting
  s390: remove orphaned extern variables declarations
  s390/kasan: make sure int handler always run with DAT on
  s390/ipl: add support to control memory clearing for nvme re-IPL
  ...
parents 96685f86 10e5afb3
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
| parisc: | TODO | | parisc: | TODO |
| powerpc: | ok | | powerpc: | ok |
| riscv: | TODO | | riscv: | TODO |
| s390: | TODO | | s390: | ok |
| sh: | ok | | sh: | ok |
| sparc: | TODO | | sparc: | TODO |
| um: | TODO | | um: | TODO |
......
...@@ -60,6 +60,7 @@ config S390 ...@@ -60,6 +60,7 @@ config S390
def_bool y def_bool y
select ARCH_BINFMT_ELF_STATE select ARCH_BINFMT_ELF_STATE
select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
...@@ -73,6 +74,7 @@ config S390 ...@@ -73,6 +74,7 @@ config S390
select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_VDSO_DATA
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH select ARCH_INLINE_READ_LOCK_BH
...@@ -118,6 +120,8 @@ config S390 ...@@ -118,6 +120,8 @@ config S390
select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES select GENERIC_CPU_VULNERABILITIES
select GENERIC_FIND_FIRST_BIT select GENERIC_FIND_FIRST_BIT
select GENERIC_GETTIMEOFDAY
select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB
...@@ -149,6 +153,7 @@ config S390 ...@@ -149,6 +153,7 @@ config S390
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4 select HAVE_KERNEL_LZ4
......
...@@ -3,17 +3,5 @@ ...@@ -3,17 +3,5 @@
config TRACE_IRQFLAGS_SUPPORT config TRACE_IRQFLAGS_SUPPORT
def_bool y def_bool y
config S390_PTDUMP
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
select DEBUG_FS
help
Say Y here if you want to show the kernel pagetable layout in a
debugfs file. This information is only useful for kernel developers
who are working in architecture specific areas of the kernel.
It is probably not a good idea to enable this feature in a production
kernel.
If in doubt, say "N"
config EARLY_PRINTK config EARLY_PRINTK
def_bool y def_bool y
...@@ -73,7 +73,3 @@ $(obj)/startup.a: $(OBJECTS) FORCE ...@@ -73,7 +73,3 @@ $(obj)/startup.a: $(OBJECTS) FORCE
install: install:
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)" System.map "$(INSTALL_PATH)"
chkbss := $(obj-y)
chkbss-target := startup.a
include $(srctree)/arch/s390/scripts/Makefile.chkbss
...@@ -62,7 +62,3 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE ...@@ -62,7 +62,3 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE $(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
chkbss := $(filter-out piggy.o info.o, $(obj-y))
chkbss-target := vmlinux.bin
include $(srctree)/arch/s390/scripts/Makefile.chkbss
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
* gzip declarations * gzip declarations
*/ */
#define STATIC static #define STATIC static
#define STATIC_RW_DATA static __section(.data)
#undef memset #undef memset
#undef memcpy #undef memcpy
......
...@@ -58,6 +58,19 @@ SECTIONS ...@@ -58,6 +58,19 @@ SECTIONS
BOOT_DATA BOOT_DATA
BOOT_DATA_PRESERVED BOOT_DATA_PRESERVED
/*
* This is the BSS section of the decompressor and not of the decompressed Linux kernel.
* It will consume place in the decompressor's image.
*/
. = ALIGN(8);
.bss : {
_bss = . ;
*(.bss)
*(.bss.*)
*(COMMON)
_ebss = .;
}
/* /*
* uncompressed image info used by the decompressor it should match * uncompressed image info used by the decompressor it should match
* struct vmlinux_info. It comes from .vmlinux.info section of * struct vmlinux_info. It comes from .vmlinux.info section of
...@@ -81,15 +94,6 @@ SECTIONS ...@@ -81,15 +94,6 @@ SECTIONS
FILL(0xff); FILL(0xff);
. = ALIGN(4096); . = ALIGN(4096);
} }
. = ALIGN(256);
.bss : {
_bss = . ;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(8); /* For convenience during zeroing */
_ebss = .;
}
_end = .; _end = .;
/* Sections to be discarded */ /* Sections to be discarded */
......
...@@ -360,22 +360,23 @@ ENTRY(startup_kdump) ...@@ -360,22 +360,23 @@ ENTRY(startup_kdump)
# the save area and does disabled wait with a faulty address. # the save area and does disabled wait with a faulty address.
# #
ENTRY(startup_pgm_check_handler) ENTRY(startup_pgm_check_handler)
stmg %r0,%r15,__LC_SAVE_AREA_SYNC stmg %r8,%r15,__LC_SAVE_AREA_SYNC
la %r1,4095 la %r8,4095
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r1) stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8)
mvc __LC_GPREGS_SAVE_AREA-4095(128,%r1),__LC_SAVE_AREA_SYNC stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8)
mvc __LC_PSW_SAVE_AREA-4095(16,%r1),__LC_PGM_OLD_PSW mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC
mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW
mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW
ni __LC_RETURN_PSW,0xfc # remove IO and EX bits ni __LC_RETURN_PSW,0xfc # remove IO and EX bits
ni __LC_RETURN_PSW+1,0xfb # remove MCHK bit ni __LC_RETURN_PSW+1,0xfb # remove MCHK bit
oi __LC_RETURN_PSW+1,0x2 # set wait state bit oi __LC_RETURN_PSW+1,0x2 # set wait state bit
larl %r2,.Lold_psw_disabled_wait larl %r9,.Lold_psw_disabled_wait
stg %r2,__LC_PGM_NEW_PSW+8 stg %r9,__LC_PGM_NEW_PSW+8
l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r2) l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9)
brasl %r14,print_pgm_check_info brasl %r14,print_pgm_check_info
.Lold_psw_disabled_wait: .Lold_psw_disabled_wait:
la %r1,4095 la %r8,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
lpswe __LC_RETURN_PSW # disabled wait lpswe __LC_RETURN_PSW # disabled wait
.Ldump_info_stack: .Ldump_info_stack:
.long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD .long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
......
...@@ -21,7 +21,7 @@ unsigned long __bootdata(memory_end); ...@@ -21,7 +21,7 @@ unsigned long __bootdata(memory_end);
int __bootdata(memory_end_set); int __bootdata(memory_end_set);
int __bootdata(noexec_disabled); int __bootdata(noexec_disabled);
int kaslr_enabled __section(.data); int kaslr_enabled;
static inline int __diag308(unsigned long subcode, void *addr) static inline int __diag308(unsigned long subcode, void *addr)
{ {
...@@ -70,30 +70,44 @@ static size_t scpdata_length(const u8 *buf, size_t count) ...@@ -70,30 +70,44 @@ static size_t scpdata_length(const u8 *buf, size_t count)
static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size, static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
const struct ipl_parameter_block *ipb) const struct ipl_parameter_block *ipb)
{ {
size_t count; const __u8 *scp_data;
size_t i; __u32 scp_data_len;
int has_lowercase; int has_lowercase;
size_t count = 0;
size_t i;
switch (ipb->pb0_hdr.pbt) {
case IPL_PBT_FCP:
scp_data_len = ipb->fcp.scp_data_len;
scp_data = ipb->fcp.scp_data;
break;
case IPL_PBT_NVME:
scp_data_len = ipb->nvme.scp_data_len;
scp_data = ipb->nvme.scp_data;
break;
default:
goto out;
}
count = min(size - 1, scpdata_length(ipb->fcp.scp_data, count = min(size - 1, scpdata_length(scp_data, scp_data_len));
ipb->fcp.scp_data_len));
if (!count) if (!count)
goto out; goto out;
has_lowercase = 0; has_lowercase = 0;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (!isascii(ipb->fcp.scp_data[i])) { if (!isascii(scp_data[i])) {
count = 0; count = 0;
goto out; goto out;
} }
if (!has_lowercase && islower(ipb->fcp.scp_data[i])) if (!has_lowercase && islower(scp_data[i]))
has_lowercase = 1; has_lowercase = 1;
} }
if (has_lowercase) if (has_lowercase)
memcpy(dest, ipb->fcp.scp_data, count); memcpy(dest, scp_data, count);
else else
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
dest[i] = tolower(ipb->fcp.scp_data[i]); dest[i] = tolower(scp_data[i]);
out: out:
dest[count] = '\0'; dest[count] = '\0';
return count; return count;
...@@ -115,6 +129,7 @@ static void append_ipl_block_parm(void) ...@@ -115,6 +129,7 @@ static void append_ipl_block_parm(void)
parm, COMMAND_LINE_SIZE - len - 1, &ipl_block); parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break; break;
case IPL_PBT_FCP: case IPL_PBT_FCP:
case IPL_PBT_NVME:
rc = ipl_block_get_ascii_scpdata( rc = ipl_block_get_ascii_scpdata(
parm, COMMAND_LINE_SIZE - len - 1, &ipl_block); parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break; break;
...@@ -209,7 +224,7 @@ static void modify_fac_list(char *str) ...@@ -209,7 +224,7 @@ static void modify_fac_list(char *str)
check_cleared_facilities(); check_cleared_facilities();
} }
static char command_line_buf[COMMAND_LINE_SIZE] __section(.data); static char command_line_buf[COMMAND_LINE_SIZE];
void parse_boot_command_line(void) void parse_boot_command_line(void)
{ {
char *param, *val; char *param, *val;
...@@ -230,7 +245,7 @@ void parse_boot_command_line(void) ...@@ -230,7 +245,7 @@ void parse_boot_command_line(void)
if (!strcmp(param, "vmalloc") && val) if (!strcmp(param, "vmalloc") && val)
vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE); vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE);
if (!strcmp(param, "dfltcc")) { if (!strcmp(param, "dfltcc") && val) {
if (!strcmp(val, "off")) if (!strcmp(val, "off"))
zlib_dfltcc_support = ZLIB_DFLTCC_DISABLED; zlib_dfltcc_support = ZLIB_DFLTCC_DISABLED;
else if (!strcmp(val, "on")) else if (!strcmp(val, "on"))
...@@ -254,17 +269,34 @@ void parse_boot_command_line(void) ...@@ -254,17 +269,34 @@ void parse_boot_command_line(void)
if (!strcmp(param, "nokaslr")) if (!strcmp(param, "nokaslr"))
kaslr_enabled = 0; kaslr_enabled = 0;
#if IS_ENABLED(CONFIG_KVM)
if (!strcmp(param, "prot_virt")) {
rc = kstrtobool(val, &enabled);
if (!rc && enabled)
prot_virt_host = 1;
}
#endif
} }
} }
static inline bool is_ipl_block_dump(void)
{
if (ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP)
return true;
if (ipl_block.pb0_hdr.pbt == IPL_PBT_NVME &&
ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP)
return true;
return false;
}
void setup_memory_end(void) void setup_memory_end(void)
{ {
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE) { if (OLDMEM_BASE) {
kaslr_enabled = 0; kaslr_enabled = 0;
} else if (ipl_block_valid && } else if (ipl_block_valid && is_ipl_block_dump()) {
ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) {
kaslr_enabled = 0; kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&memory_end) && memory_end) if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
memory_end_set = 1; memory_end_set = 1;
......
...@@ -42,7 +42,7 @@ static int check_prng(void) ...@@ -42,7 +42,7 @@ static int check_prng(void)
return PRNG_MODE_TDES; return PRNG_MODE_TDES;
} }
static unsigned long get_random(unsigned long limit) static int get_random(unsigned long limit, unsigned long *value)
{ {
struct prng_parm prng = { struct prng_parm prng = {
/* initial parameter block for tdes mode, copied from libica */ /* initial parameter block for tdes mode, copied from libica */
...@@ -84,19 +84,101 @@ static unsigned long get_random(unsigned long limit) ...@@ -84,19 +84,101 @@ static unsigned long get_random(unsigned long limit)
(u8 *) &random, sizeof(random)); (u8 *) &random, sizeof(random));
break; break;
default: default:
random = 0; return -1;
} }
return random % limit; *value = random % limit;
return 0;
}
/*
* To randomize kernel base address we have to consider several facts:
* 1. physical online memory might not be continuous and have holes. mem_detect
* info contains list of online memory ranges we should consider.
* 2. we have several memory regions which are occupied and we should not
* overlap and destroy them. Currently safe_addr tells us the border below
* which all those occupied regions are. We are safe to use anything above
* safe_addr.
* 3. the upper limit might apply as well, even if memory above that limit is
* online. Currently those limitations are:
* 3.1. Limit set by "mem=" kernel command line option
* 3.2. memory reserved at the end for kasan initialization.
* 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
* Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
* (16 pages when the kernel is built with kasan enabled)
* Assumptions:
* 1. kernel size (including .bss size) and upper memory limit are page aligned.
* 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
* aligned (in practice memory configurations granularity on z/VM and LPAR
* is 1mb).
*
* To guarantee uniform distribution of kernel base address among all suitable
* addresses we generate random value just once. For that we need to build a
* continuous range in which every value would be suitable. We can build this
* range by simply counting all suitable addresses (let's call them positions)
* which would be valid as kernel base address. To count positions we iterate
* over online memory ranges. For each range which is big enough for the
* kernel image we count all suitable addresses we can put the kernel image at
* that is
* (end - start - kernel_size) / THREAD_SIZE + 1
* Two functions count_valid_kernel_positions and position_to_address help
* to count positions in memory range given and then convert position back
* to address.
*/
static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
unsigned long _min,
unsigned long _max)
{
unsigned long start, end, pos = 0;
int i;
for_each_mem_detect_block(i, &start, &end) {
if (_min >= end)
continue;
if (start >= _max)
break;
start = max(_min, start);
end = min(_max, end);
if (end - start < kernel_size)
continue;
pos += (end - start - kernel_size) / THREAD_SIZE + 1;
}
return pos;
}
static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
unsigned long _min, unsigned long _max)
{
unsigned long start, end;
int i;
for_each_mem_detect_block(i, &start, &end) {
if (_min >= end)
continue;
if (start >= _max)
break;
start = max(_min, start);
end = min(_max, end);
if (end - start < kernel_size)
continue;
if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
return start + (pos - 1) * THREAD_SIZE;
pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
}
return 0;
} }
unsigned long get_random_base(unsigned long safe_addr) unsigned long get_random_base(unsigned long safe_addr)
{ {
unsigned long memory_limit = memory_end_set ? memory_end : 0; unsigned long memory_limit = get_mem_detect_end();
unsigned long base, start, end, kernel_size; unsigned long base_pos, max_pos, kernel_size;
unsigned long block_sum, offset;
unsigned long kasan_needs; unsigned long kasan_needs;
int i; int i;
if (memory_end_set)
memory_limit = min(memory_limit, memory_end);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) { if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
if (safe_addr < INITRD_START + INITRD_SIZE) if (safe_addr < INITRD_START + INITRD_SIZE)
safe_addr = INITRD_START + INITRD_SIZE; safe_addr = INITRD_START + INITRD_SIZE;
...@@ -126,45 +208,17 @@ unsigned long get_random_base(unsigned long safe_addr) ...@@ -126,45 +208,17 @@ unsigned long get_random_base(unsigned long safe_addr)
} }
kernel_size = vmlinux.image_size + vmlinux.bss_size; kernel_size = vmlinux.image_size + vmlinux.bss_size;
block_sum = 0; if (safe_addr + kernel_size > memory_limit)
for_each_mem_detect_block(i, &start, &end) { return 0;
if (memory_limit) {
if (start >= memory_limit) max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
break; if (!max_pos) {
if (end > memory_limit)
end = memory_limit;
}
if (end - start < kernel_size)
continue;
block_sum += end - start - kernel_size;
}
if (!block_sum) {
sclp_early_printk("KASLR disabled: not enough memory\n"); sclp_early_printk("KASLR disabled: not enough memory\n");
return 0; return 0;
} }
base = get_random(block_sum); /* we need a value in the range [1, base_pos] inclusive */
if (base == 0) if (get_random(max_pos, &base_pos))
return 0; return 0;
if (base < safe_addr) return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
base = safe_addr;
block_sum = offset = 0;
for_each_mem_detect_block(i, &start, &end) {
if (memory_limit) {
if (start >= memory_limit)
break;
if (end > memory_limit)
end = memory_limit;
}
if (end - start < kernel_size)
continue;
block_sum += end - start - kernel_size;
if (base <= block_sum) {
base = start + base - offset;
base = ALIGN_DOWN(base, THREAD_SIZE);
break;
}
offset = block_sum;
}
return base;
} }
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include "boot.h" #include "boot.h"
...@@ -32,7 +33,8 @@ void print_pgm_check_info(void) ...@@ -32,7 +33,8 @@ void print_pgm_check_info(void)
char *p; char *p;
add_str(buf, "Linux version "); add_str(buf, "Linux version ");
strlcat(buf, kernel_version, sizeof(buf)); strlcat(buf, kernel_version, sizeof(buf) - 1);
strlcat(buf, "\n", sizeof(buf));
sclp_early_printk(buf); sclp_early_printk(buf);
p = add_str(buf, "Kernel fault: interruption code "); p = add_str(buf, "Kernel fault: interruption code ");
...@@ -42,6 +44,13 @@ void print_pgm_check_info(void) ...@@ -42,6 +44,13 @@ void print_pgm_check_info(void)
add_str(p, "\n"); add_str(p, "\n");
sclp_early_printk(buf); sclp_early_printk(buf);
if (kaslr_enabled) {
p = add_str(buf, "Kernel random base: ");
p = add_val_as_hex(p, __kaslr_offset);
add_str(p, "\n");
sclp_early_printk(buf);
}
p = add_str(buf, "PSW : "); p = add_str(buf, "PSW : ");
p = add_val_as_hex(p, S390_lowcore.psw_save_area.mask); p = add_val_as_hex(p, S390_lowcore.psw_save_area.mask);
p = add_str(p, " "); p = add_str(p, " ");
......
...@@ -48,8 +48,6 @@ struct diag_ops __bootdata_preserved(diag_dma_ops) = { ...@@ -48,8 +48,6 @@ struct diag_ops __bootdata_preserved(diag_dma_ops) = {
}; };
static struct diag210 _diag210_tmp_dma __section(.dma.data); static struct diag210 _diag210_tmp_dma __section(.dma.data);
struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma; struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
void _swsusp_reset_dma(void);
unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);
void error(char *x) void error(char *x)
{ {
...@@ -120,6 +118,9 @@ static void handle_relocs(unsigned long offset) ...@@ -120,6 +118,9 @@ static void handle_relocs(unsigned long offset)
} }
} }
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
static void clear_bss_section(void) static void clear_bss_section(void)
{ {
memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size); memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
......
...@@ -96,23 +96,6 @@ ENTRY(_diag0c_dma) ...@@ -96,23 +96,6 @@ ENTRY(_diag0c_dma)
BR_EX_DMA_r14 BR_EX_DMA_r14
ENDPROC(_diag0c_dma) ENDPROC(_diag0c_dma)
/*
* void _swsusp_reset_dma(void)
*/
ENTRY(_swsusp_reset_dma)
larl %r1,restart_entry
larl %r2,.Lrestart_diag308_psw
og %r1,0(%r2)
stg %r1,0(%r0)
lghi %r0,0
diag %r0,%r0,0x308
restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
BR_EX_DMA_r14
ENDPROC(_swsusp_reset_dma)
/* /*
* void _diag308_reset_dma(void) * void _diag308_reset_dma(void)
* *
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest); int __bootdata_preserved(prot_virt_guest);
#endif #endif
#if IS_ENABLED(CONFIG_KVM)
int __bootdata_preserved(prot_virt_host);
#endif
struct uv_info __bootdata_preserved(uv_info); struct uv_info __bootdata_preserved(uv_info);
void uv_query_info(void) void uv_query_info(void)
......
...@@ -775,6 +775,8 @@ CONFIG_MAGIC_SYSRQ=y ...@@ -775,6 +775,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y CONFIG_DEBUG_PAGEALLOC=y
CONFIG_PAGE_OWNER=y CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_RODATA_TEST=y CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_WX=y
CONFIG_PTDUMP_DEBUGFS=y
CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_SELFTEST=y CONFIG_DEBUG_OBJECTS_SELFTEST=y
CONFIG_DEBUG_OBJECTS_FREE=y CONFIG_DEBUG_OBJECTS_FREE=y
...@@ -822,7 +824,6 @@ CONFIG_FTRACE_SYSCALLS=y ...@@ -822,7 +824,6 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y CONFIG_FAULT_INJECTION=y
......
...@@ -759,6 +759,8 @@ CONFIG_GDB_SCRIPTS=y ...@@ -759,6 +759,8 @@ CONFIG_GDB_SCRIPTS=y
CONFIG_FRAME_WARN=1024 CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_WX=y
CONFIG_PTDUMP_DEBUGFS=y
CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_TEST_LOCKUP=m CONFIG_TEST_LOCKUP=m
...@@ -775,7 +777,6 @@ CONFIG_FTRACE_SYSCALLS=y ...@@ -775,7 +777,6 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_LKDTM=m CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y CONFIG_ATOMIC64_SELFTEST=y
......
...@@ -13,21 +13,21 @@ ...@@ -13,21 +13,21 @@
#define _S390_CHECKSUM_H #define _S390_CHECKSUM_H
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/in6.h>
/* /*
* computes the checksum of a memory block at buff, length len, * Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit) * and adds in "sum" (32-bit).
* *
* returns a 32-bit number suitable for feeding into itself * Returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic * or csum_tcpudp_magic.
* *
* this function must be called with even lengths, except * This function must be called with even lengths, except
* for the last fragment, which may be odd * for the last fragment, which may be odd.
* *
* it's best to have buff aligned on a 32-bit boundary * It's best to have buff aligned on a 32-bit boundary.
*/ */
static inline __wsum static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
csum_partial(const void *buff, int len, __wsum sum)
{ {
register unsigned long reg2 asm("2") = (unsigned long) buff; register unsigned long reg2 asm("2") = (unsigned long) buff;
register unsigned long reg3 asm("3") = (unsigned long) len; register unsigned long reg3 asm("3") = (unsigned long) len;
...@@ -40,74 +40,91 @@ csum_partial(const void *buff, int len, __wsum sum) ...@@ -40,74 +40,91 @@ csum_partial(const void *buff, int len, __wsum sum)
} }
/* /*
* Fold a partial checksum without adding pseudo headers * Fold a partial checksum without adding pseudo headers.
*/ */
static inline __sum16 csum_fold(__wsum sum) static inline __sum16 csum_fold(__wsum sum)
{ {
u32 csum = (__force u32) sum; u32 csum = (__force u32) sum;
csum += (csum >> 16) + (csum << 16); csum += (csum >> 16) | (csum << 16);
csum >>= 16; csum >>= 16;
return (__force __sum16) ~csum; return (__force __sum16) ~csum;
} }
/* /*
* This is a version of ip_compute_csum() optimized for IP headers, * This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries. * which always checksums on 4 octet boundaries.
*
*/ */
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{ {
return csum_fold(csum_partial(iph, ihl*4, 0)); __u64 csum = 0;
__u32 *ptr = (u32 *)iph;
csum += *ptr++;
csum += *ptr++;
csum += *ptr++;
csum += *ptr++;
ihl -= 4;
while (ihl--)
csum += *ptr++;
csum += (csum >> 32) | (csum << 32);
return csum_fold((__force __wsum)(csum >> 32));
} }
/* /*
* computes the checksum of the TCP/UDP pseudo-header * Computes the checksum of the TCP/UDP pseudo-header.
* returns a 32-bit checksum * Returns a 32-bit checksum.
*/ */
static inline __wsum static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u8 proto, __wsum sum)
__wsum sum)
{ {
__u32 csum = (__force __u32)sum; __u64 csum = (__force __u64)sum;
csum += (__force __u32)saddr; csum += (__force __u32)saddr;
if (csum < (__force __u32)saddr)
csum++;
csum += (__force __u32)daddr; csum += (__force __u32)daddr;
if (csum < (__force __u32)daddr) csum += len;
csum++; csum += proto;
csum += (csum >> 32) | (csum << 32);
csum += len + proto; return (__force __wsum)(csum >> 32);
if (csum < len + proto)
csum++;
return (__force __wsum)csum;
} }
/* /*
* computes the checksum of the TCP/UDP pseudo-header * Computes the checksum of the TCP/UDP pseudo-header.
* returns a 16-bit checksum, already complemented * Returns a 16-bit checksum, already complemented.
*/ */
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
static inline __sum16 __u8 proto, __wsum sum)
csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto,
__wsum sum)
{ {
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
} }
/* /*
* this routine is used for miscellaneous IP-like checksums, mainly * Used for miscellaneous IP-like checksums, mainly icmp.
* in icmp.c
*/ */
static inline __sum16 ip_compute_csum(const void *buff, int len) static inline __sum16 ip_compute_csum(const void *buff, int len)
{ {
return csum_fold(csum_partial(buff, len, 0)); return csum_fold(csum_partial(buff, len, 0));
} }
#endif /* _S390_CHECKSUM_H */ #define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto, __wsum csum)
{
__u64 sum = (__force __u64)csum;
sum += (__force __u32)saddr->s6_addr32[0];
sum += (__force __u32)saddr->s6_addr32[1];
sum += (__force __u32)saddr->s6_addr32[2];
sum += (__force __u32)saddr->s6_addr32[3];
sum += (__force __u32)daddr->s6_addr32[0];
sum += (__force __u32)daddr->s6_addr32[1];
sum += (__force __u32)daddr->s6_addr32[2];
sum += (__force __u32)daddr->s6_addr32[3];
sum += len;
sum += proto;
sum += (sum >> 32) | (sum << 32);
return csum_fold((__force __wsum)(sum >> 32));
}
#endif /* _S390_CHECKSUM_H */
...@@ -356,7 +356,6 @@ static inline u8 pathmask_to_pos(u8 mask) ...@@ -356,7 +356,6 @@ static inline u8 pathmask_to_pos(u8 mask)
return 8 - ffs(mask); return 8 - ffs(mask);
} }
void channel_subsystem_reinit(void);
extern void css_schedule_reprobe(void); extern void css_schedule_reprobe(void);
extern void *cio_dma_zalloc(size_t size); extern void *cio_dma_zalloc(size_t size);
...@@ -372,6 +371,7 @@ struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); ...@@ -372,6 +371,7 @@ struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages);
/* Function from drivers/s390/cio/chsc.c */ /* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size); int chsc_sstpi(void *page, void *result, size_t size);
int chsc_stzi(void *page, void *result, size_t size);
int chsc_sgib(u32 origin); int chsc_sgib(u32 origin);
#endif #endif
/* SPDX-License-Identifier: GPL-2.0 */
/* s390-specific clocksource additions */
#ifndef _ASM_S390_CLOCKSOURCE_H
#define _ASM_S390_CLOCKSOURCE_H
#endif /* _ASM_S390_CLOCKSOURCE_H */
...@@ -5,6 +5,9 @@ ...@@ -5,6 +5,9 @@
/* CLP common request & response block size */ /* CLP common request & response block size */
#define CLP_BLK_SIZE PAGE_SIZE #define CLP_BLK_SIZE PAGE_SIZE
/* Call Logical Processor - Command Code */
#define CLP_SLPC 0x0001
#define CLP_LPS_BASE 0 #define CLP_LPS_BASE 0
#define CLP_LPS_PCI 2 #define CLP_LPS_PCI 2
......
...@@ -140,8 +140,6 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte); ...@@ -140,8 +140,6 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
void gmap_register_pte_notifier(struct gmap_notifier *); void gmap_register_pte_notifier(struct gmap_notifier *);
void gmap_unregister_pte_notifier(struct gmap_notifier *); void gmap_unregister_pte_notifier(struct gmap_notifier *);
void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
unsigned long bits);
int gmap_mprotect_notify(struct gmap *, unsigned long start, int gmap_mprotect_notify(struct gmap *, unsigned long start,
unsigned long len, int prot); unsigned long len, int prot);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pci_io.h> #include <asm/pci_io.h>
#define xlate_dev_mem_ptr xlate_dev_mem_ptr #define xlate_dev_mem_ptr xlate_dev_mem_ptr
...@@ -26,7 +27,10 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); ...@@ -26,7 +27,10 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0 #define IO_SPACE_LIMIT 0
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
void __iomem *ioremap(phys_addr_t addr, size_t size); void __iomem *ioremap(phys_addr_t addr, size_t size);
void __iomem *ioremap_wc(phys_addr_t addr, size_t size);
void __iomem *ioremap_wt(phys_addr_t addr, size_t size);
void iounmap(volatile void __iomem *addr); void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
...@@ -52,6 +56,10 @@ static inline void ioport_unmap(void __iomem *p) ...@@ -52,6 +56,10 @@ static inline void ioport_unmap(void __iomem *p)
#define pci_iomap_wc pci_iomap_wc #define pci_iomap_wc pci_iomap_wc
#define pci_iomap_wc_range pci_iomap_wc_range #define pci_iomap_wc_range pci_iomap_wc_range
#define ioremap ioremap
#define ioremap_wt ioremap_wt
#define ioremap_wc ioremap_wc
#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count) #define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count) #define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
#define memset_io(dst, val, count) zpci_memset_io(dst, val, count) #define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
......
...@@ -66,6 +66,7 @@ enum ipl_type { ...@@ -66,6 +66,7 @@ enum ipl_type {
IPL_TYPE_FCP_DUMP = 8, IPL_TYPE_FCP_DUMP = 8,
IPL_TYPE_NSS = 16, IPL_TYPE_NSS = 16,
IPL_TYPE_NVME = 32, IPL_TYPE_NVME = 32,
IPL_TYPE_NVME_DUMP = 64,
}; };
struct ipl_info struct ipl_info
...@@ -94,6 +95,12 @@ extern struct ipl_info ipl_info; ...@@ -94,6 +95,12 @@ extern struct ipl_info ipl_info;
extern void setup_ipl(void); extern void setup_ipl(void);
extern void set_os_info_reipl_block(void); extern void set_os_info_reipl_block(void);
static inline bool is_ipl_type_dump(void)
{
return (ipl_info.type == IPL_TYPE_FCP_DUMP) ||
(ipl_info.type == IPL_TYPE_NVME_DUMP);
}
struct ipl_report { struct ipl_report {
struct ipl_parameter_block *ipib; struct ipl_parameter_block *ipib;
struct list_head components; struct list_head components;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
extern void kasan_early_init(void); extern void kasan_early_init(void);
extern void kasan_copy_shadow(pgd_t *dst); extern void kasan_copy_shadow(pgd_t *dst);
extern void kasan_free_early_identity(void); extern void kasan_free_early_identity(void);
extern unsigned long kasan_vmax;
#else #else
static inline void kasan_early_init(void) { } static inline void kasan_early_init(void) { }
static inline void kasan_copy_shadow(pgd_t *dst) { } static inline void kasan_copy_shadow(pgd_t *dst) { }
......
...@@ -208,9 +208,8 @@ int zpci_unregister_ioat(struct zpci_dev *, u8); ...@@ -208,9 +208,8 @@ int zpci_unregister_ioat(struct zpci_dev *, u8);
void zpci_remove_reserved_devices(void); void zpci_remove_reserved_devices(void);
/* CLP */ /* CLP */
int clp_setup_writeback_mio(void);
int clp_scan_pci_devices(void); int clp_scan_pci_devices(void);
int clp_rescan_pci_devices(void);
int clp_rescan_pci_devices_simple(u32 *fid);
int clp_add_pci_device(u32, u32, int); int clp_add_pci_device(u32, u32, int);
int clp_enable_fh(struct zpci_dev *, u8); int clp_enable_fh(struct zpci_dev *, u8);
int clp_disable_fh(struct zpci_dev *); int clp_disable_fh(struct zpci_dev *);
...@@ -232,12 +231,10 @@ static inline bool zpci_use_mio(struct zpci_dev *zdev) ...@@ -232,12 +231,10 @@ static inline bool zpci_use_mio(struct zpci_dev *zdev)
/* Error handling and recovery */ /* Error handling and recovery */
void zpci_event_error(void *); void zpci_event_error(void *);
void zpci_event_availability(void *); void zpci_event_availability(void *);
void zpci_rescan(void);
bool zpci_is_enabled(void); bool zpci_is_enabled(void);
#else /* CONFIG_PCI */ #else /* CONFIG_PCI */
static inline void zpci_event_error(void *e) {} static inline void zpci_event_error(void *e) {}
static inline void zpci_event_availability(void *e) {} static inline void zpci_event_availability(void *e) {}
static inline void zpci_rescan(void) {}
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
#ifdef CONFIG_HOTPLUG_PCI_S390 #ifdef CONFIG_HOTPLUG_PCI_S390
...@@ -282,7 +279,6 @@ int zpci_debug_init(void); ...@@ -282,7 +279,6 @@ int zpci_debug_init(void);
void zpci_debug_exit(void); void zpci_debug_exit(void);
void zpci_debug_init_device(struct zpci_dev *, const char *); void zpci_debug_init_device(struct zpci_dev *, const char *);
void zpci_debug_exit_device(struct zpci_dev *); void zpci_debug_exit_device(struct zpci_dev *);
void zpci_debug_info(struct zpci_dev *, struct seq_file *);
/* Error reporting */ /* Error reporting */
int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *); int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
/* /*
* Call Logical Processor - Command Codes * Call Logical Processor - Command Codes
*/ */
#define CLP_SLPC 0x0001
#define CLP_LIST_PCI 0x0002 #define CLP_LIST_PCI 0x0002
#define CLP_QUERY_PCI_FN 0x0003 #define CLP_QUERY_PCI_FN 0x0003
#define CLP_QUERY_PCI_FNGRP 0x0004 #define CLP_QUERY_PCI_FNGRP 0x0004
...@@ -51,6 +52,19 @@ struct clp_fh_list_entry { ...@@ -51,6 +52,19 @@ struct clp_fh_list_entry {
extern bool zpci_unique_uid; extern bool zpci_unique_uid;
struct clp_rsp_slpc_pci {
struct clp_rsp_hdr hdr;
u32 reserved2[4];
u32 lpif[8];
u32 reserved3[4];
u32 vwb : 1;
u32 : 1;
u32 mio_wb : 6;
u32 : 24;
u32 reserved5[3];
u32 lpic[8];
} __packed;
/* List PCI functions request */ /* List PCI functions request */
struct clp_req_list_pci { struct clp_req_list_pci {
struct clp_req_hdr hdr; struct clp_req_hdr hdr;
...@@ -172,6 +186,11 @@ struct clp_rsp_set_pci { ...@@ -172,6 +186,11 @@ struct clp_rsp_set_pci {
} __packed; } __packed;
/* Combined request/response block structures used by clp insn */ /* Combined request/response block structures used by clp insn */
struct clp_req_rsp_slpc_pci {
struct clp_req_slpc request;
struct clp_rsp_slpc_pci response;
} __packed;
struct clp_req_rsp_list_pci { struct clp_req_rsp_list_pci {
struct clp_req_list_pci request; struct clp_req_list_pci request;
struct clp_rsp_list_pci response; struct clp_rsp_list_pci response;
......
...@@ -146,8 +146,6 @@ static inline void pmd_populate(struct mm_struct *mm, ...@@ -146,8 +146,6 @@ static inline void pmd_populate(struct mm_struct *mm,
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
extern void rcu_table_freelist_finish(void);
void vmem_map_init(void); void vmem_map_init(void);
void *vmem_crst_alloc(unsigned long val); void *vmem_crst_alloc(unsigned long val);
pte_t *vmem_pte_alloc(void); pte_t *vmem_pte_alloc(void);
......
...@@ -89,6 +89,7 @@ extern unsigned long VMALLOC_START; ...@@ -89,6 +89,7 @@ extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END; extern unsigned long VMALLOC_END;
#define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN) #define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
extern struct page *vmemmap; extern struct page *vmemmap;
extern unsigned long vmemmap_size;
#define VMEM_MAX_PHYS ((unsigned long) vmemmap) #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
...@@ -1186,6 +1187,12 @@ void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); ...@@ -1186,6 +1187,12 @@ void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
#define pgprot_writecombine pgprot_writecombine
pgprot_t pgprot_writecombine(pgprot_t prot);
#define pgprot_writethrough pgprot_writethrough
pgprot_t pgprot_writethrough(pgprot_t prot);
/* /*
* Certain architectures need to do special things when PTEs * Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following * within a page table are directly modified. Thus, the following
...@@ -1209,7 +1216,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -1209,7 +1216,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{ {
pte_t __pte; pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot);
pte_val(__pte) = physpage | pgprot_val(pgprot);
if (!MACHINE_HAS_NX) if (!MACHINE_HAS_NX)
pte_val(__pte) &= ~_PAGE_NOEXEC; pte_val(__pte) &= ~_PAGE_NOEXEC;
return pte_mkyoung(__pte); return pte_mkyoung(__pte);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_PTDUMP_H
#define _ASM_S390_PTDUMP_H
void ptdump_check_wx(void);
static inline void debug_checkwx(void)
{
if (IS_ENABLED(CONFIG_DEBUG_WX))
ptdump_check_wx();
}
#endif /* _ASM_S390_PTDUMP_H */
...@@ -26,9 +26,9 @@ ...@@ -26,9 +26,9 @@
/** /**
* struct qdesfmt0 - queue descriptor, format 0 * struct qdesfmt0 - queue descriptor, format 0
* @sliba: storage list information block address * @sliba: absolute address of storage list information block
* @sla: storage list address * @sla: absolute address of storage list
* @slsba: storage list state block address * @slsba: absolute address of storage list state block
* @akey: access key for SLIB * @akey: access key for SLIB
* @bkey: access key for SL * @bkey: access key for SL
* @ckey: access key for SBALs * @ckey: access key for SBALs
...@@ -56,7 +56,7 @@ struct qdesfmt0 { ...@@ -56,7 +56,7 @@ struct qdesfmt0 {
* @oqdcnt: output queue descriptor count * @oqdcnt: output queue descriptor count
* @iqdsz: input queue descriptor size * @iqdsz: input queue descriptor size
* @oqdsz: output queue descriptor size * @oqdsz: output queue descriptor size
* @qiba: queue information block address * @qiba: absolute address of queue information block
* @qkey: queue information block key * @qkey: queue information block key
* @qdf0: queue descriptions * @qdf0: queue descriptions
*/ */
...@@ -327,7 +327,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, ...@@ -327,7 +327,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* struct qdio_initialize - qdio initialization data * struct qdio_initialize - qdio initialization data
* @q_format: queue format * @q_format: queue format
* @qdr_ac: feature flags to set * @qdr_ac: feature flags to set
* @adapter_name: name for the adapter
* @qib_param_field_format: format for qib_parm_field * @qib_param_field_format: format for qib_parm_field
* @qib_param_field: pointer to 128 bytes or NULL, if no param field * @qib_param_field: pointer to 128 bytes or NULL, if no param field
* @qib_rflags: rflags to set * @qib_rflags: rflags to set
...@@ -347,7 +346,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, ...@@ -347,7 +346,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
struct qdio_initialize { struct qdio_initialize {
unsigned char q_format; unsigned char q_format;
unsigned char qdr_ac; unsigned char qdr_ac;
unsigned char adapter_name[8];
unsigned int qib_param_field_format; unsigned int qib_param_field_format;
unsigned char *qib_param_field; unsigned char *qib_param_field;
unsigned char qib_rflags; unsigned char qib_rflags;
......
...@@ -114,8 +114,7 @@ int sclp_early_get_core_info(struct sclp_core_info *info); ...@@ -114,8 +114,7 @@ int sclp_early_get_core_info(struct sclp_core_info *info);
void sclp_early_get_ipl_info(struct sclp_ipl_info *info); void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void); void sclp_early_detect(void);
void sclp_early_printk(const char *s); void sclp_early_printk(const char *s);
void sclp_early_printk_force(const char *s); void __sclp_early_printk(const char *s, unsigned int len);
void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
int sclp_early_get_memsize(unsigned long *mem); int sclp_early_get_memsize(unsigned long *mem);
int sclp_early_get_hsa_size(unsigned long *hsa_size); int sclp_early_get_hsa_size(unsigned long *hsa_size);
...@@ -129,6 +128,8 @@ int sclp_chp_deconfigure(struct chp_id chpid); ...@@ -129,6 +128,8 @@ int sclp_chp_deconfigure(struct chp_id chpid);
int sclp_chp_read_info(struct sclp_chp_info *info); int sclp_chp_read_info(struct sclp_chp_info *info);
int sclp_pci_configure(u32 fid); int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid); int sclp_pci_deconfigure(u32 fid);
int sclp_ap_configure(u32 apid);
int sclp_ap_deconfigure(u32 apid);
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid); int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count); int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count); int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
......
...@@ -2,6 +2,10 @@ ...@@ -2,6 +2,10 @@
#ifndef _ASMS390_SET_MEMORY_H #ifndef _ASMS390_SET_MEMORY_H
#define _ASMS390_SET_MEMORY_H #define _ASMS390_SET_MEMORY_H
#include <linux/mutex.h>
extern struct mutex cpa_mutex;
#define SET_MEMORY_RO 1UL #define SET_MEMORY_RO 1UL
#define SET_MEMORY_RW 2UL #define SET_MEMORY_RW 2UL
#define SET_MEMORY_NX 4UL #define SET_MEMORY_NX 4UL
......
...@@ -92,7 +92,9 @@ extern int memory_end_set; ...@@ -92,7 +92,9 @@ extern int memory_end_set;
extern unsigned long memory_end; extern unsigned long memory_end;
extern unsigned long vmalloc_size; extern unsigned long vmalloc_size;
extern unsigned long max_physmem_end; extern unsigned long max_physmem_end;
extern unsigned long __swsusp_reset_dma;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
...@@ -119,9 +121,6 @@ extern unsigned int console_mode; ...@@ -119,9 +121,6 @@ extern unsigned int console_mode;
extern unsigned int console_devno; extern unsigned int console_devno;
extern unsigned int console_irq; extern unsigned int console_irq;
extern char vmhalt_cmd[];
extern char vmpoff_cmd[];
#define CONSOLE_IS_UNDEFINED (console_mode == 0) #define CONSOLE_IS_UNDEFINED (console_mode == 0)
#define CONSOLE_IS_SCLP (console_mode == 1) #define CONSOLE_IS_SCLP (console_mode == 1)
#define CONSOLE_IS_3215 (console_mode == 2) #define CONSOLE_IS_3215 (console_mode == 2)
......
...@@ -31,7 +31,6 @@ extern void smp_emergency_stop(void); ...@@ -31,7 +31,6 @@ extern void smp_emergency_stop(void);
extern int smp_find_processor_id(u16 address); extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu); extern int smp_store_status(int cpu);
extern void smp_save_dump_cpus(void); extern void smp_save_dump_cpus(void);
extern int smp_vcpu_scheduled(int cpu);
extern void smp_yield_cpu(int cpu); extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val); extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu); extern int smp_cpu_get_polarization(int cpu);
......
...@@ -6,43 +6,89 @@ ...@@ -6,43 +6,89 @@
#ifndef __S390_STP_H #ifndef __S390_STP_H
#define __S390_STP_H #define __S390_STP_H
#include <linux/compiler.h>
/* notifier for syncs */ /* notifier for syncs */
extern struct atomic_notifier_head s390_epoch_delta_notifier; extern struct atomic_notifier_head s390_epoch_delta_notifier;
/* STP interruption parameter */ /* STP interruption parameter */
struct stp_irq_parm { struct stp_irq_parm {
unsigned int _pad0 : 14; u32 : 14;
unsigned int tsc : 1; /* Timing status change */ u32 tsc : 1; /* Timing status change */
unsigned int lac : 1; /* Link availability change */ u32 lac : 1; /* Link availability change */
unsigned int tcpc : 1; /* Time control parameter change */ u32 tcpc : 1; /* Time control parameter change */
unsigned int _pad2 : 15; u32 : 15;
} __attribute__ ((packed)); } __packed;
#define STP_OP_SYNC 1 #define STP_OP_SYNC 1
#define STP_OP_CTRL 3 #define STP_OP_CTRL 3
struct stp_sstpi { struct stp_sstpi {
unsigned int rsvd0; u32 : 32;
unsigned int rsvd1 : 8; u32 tu : 1;
unsigned int stratum : 8; u32 lu : 1;
unsigned int vbits : 16; u32 : 6;
unsigned int leaps : 16; u32 stratum : 8;
unsigned int tmd : 4; u32 vbits : 16;
unsigned int ctn : 4; u32 leaps : 16;
unsigned int rsvd2 : 3; u32 tmd : 4;
unsigned int c : 1; u32 ctn : 4;
unsigned int tst : 4; u32 : 3;
unsigned int tzo : 16; u32 c : 1;
unsigned int dsto : 16; u32 tst : 4;
unsigned int ctrl : 16; u32 tzo : 16;
unsigned int rsvd3 : 16; u32 dsto : 16;
unsigned int tto; u32 ctrl : 16;
unsigned int rsvd4; u32 : 16;
unsigned int ctnid[3]; u32 tto;
unsigned int rsvd5; u32 : 32;
unsigned int todoff[4]; u32 ctnid[3];
unsigned int rsvd6[48]; u32 : 32;
} __attribute__ ((packed)); u32 todoff[4];
u32 rsvd[48];
} __packed;
struct stp_tzib {
u32 tzan : 16;
u32 : 16;
u32 tzo : 16;
u32 dsto : 16;
u32 stn;
u32 dstn;
u64 dst_on_alg;
u64 dst_off_alg;
} __packed;
struct stp_tcpib {
u32 atcode : 4;
u32 ntcode : 4;
u32 d : 1;
u32 : 23;
s32 tto;
struct stp_tzib atzib;
struct stp_tzib ntzib;
s32 adst_offset : 16;
s32 ndst_offset : 16;
u32 rsvd1;
u64 ntzib_update;
u64 ndsto_update;
} __packed;
struct stp_lsoib {
u32 p : 1;
u32 : 31;
s32 also : 16;
s32 nlso : 16;
u64 nlsout;
} __packed;
struct stp_stzi {
u32 rsvd0[3];
u64 data_ts;
u32 rsvd1[22];
struct stp_tcpib tcpib;
struct stp_lsoib lsoib;
} __packed;
/* Functions needed by the machine check handler */ /* Functions needed by the machine check handler */
int stp_sync_check(void); int stp_sync_check(void);
......
...@@ -30,8 +30,6 @@ static inline void __tlb_flush_idte(unsigned long asce) ...@@ -30,8 +30,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
: : "a" (opt), "a" (asce) : "cc"); : : "a" (opt), "a" (asce) : "cc");
} }
void smp_ptlb_all(void);
/* /*
* Flush all TLB entries on all CPUs. * Flush all TLB entries on all CPUs.
*/ */
......
...@@ -60,6 +60,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n); ...@@ -60,6 +60,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
#define INLINE_COPY_TO_USER #define INLINE_COPY_TO_USER
#endif #endif
int __put_user_bad(void) __attribute__((noreturn));
int __get_user_bad(void) __attribute__((noreturn));
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
#define __put_get_user_asm(to, from, size, spec) \ #define __put_get_user_asm(to, from, size, spec) \
...@@ -109,6 +112,9 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon ...@@ -109,6 +112,9 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
(unsigned long *)x, (unsigned long *)x,
size, spec); size, spec);
break; break;
default:
__put_user_bad();
break;
} }
return rc; return rc;
} }
...@@ -139,6 +145,9 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign ...@@ -139,6 +145,9 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
(unsigned long __user *)ptr, (unsigned long __user *)ptr,
size, spec); size, spec);
break; break;
default:
__get_user_bad();
break;
} }
return rc; return rc;
} }
...@@ -179,7 +188,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s ...@@ -179,7 +188,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
default: \ default: \
__put_user_bad(); \ __put_user_bad(); \
break; \ break; \
} \ } \
__builtin_expect(__pu_err, 0); \ __builtin_expect(__pu_err, 0); \
}) })
...@@ -190,8 +199,6 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s ...@@ -190,8 +199,6 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
}) })
int __put_user_bad(void) __attribute__((noreturn));
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
({ \ ({ \
int __gu_err = -EFAULT; \ int __gu_err = -EFAULT; \
...@@ -238,8 +245,6 @@ int __put_user_bad(void) __attribute__((noreturn)); ...@@ -238,8 +245,6 @@ int __put_user_bad(void) __attribute__((noreturn));
__get_user(x, ptr); \ __get_user(x, ptr); \
}) })
int __get_user_bad(void) __attribute__((noreturn));
unsigned long __must_check unsigned long __must_check
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
...@@ -278,4 +283,115 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo ...@@ -278,4 +283,115 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
int copy_to_user_real(void __user *dest, void *src, unsigned long count); int copy_to_user_real(void __user *dest, void *src, unsigned long count);
void *s390_kernel_write(void *dst, const void *src, size_t size); void *s390_kernel_write(void *dst, const void *src, size_t size);
#define HAVE_GET_KERNEL_NOFAULT
int __noreturn __put_kernel_bad(void);
#define __put_kernel_asm(val, to, insn) \
({ \
int __rc; \
\
asm volatile( \
"0: " insn " %2,%1\n" \
"1: xr %0,%0\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: lhi %0,%3\n" \
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
: "=d" (__rc), "+Q" (*(to)) \
: "d" (val), "K" (-EFAULT) \
: "cc"); \
__rc; \
})
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
u64 __x = (u64)(*((type *)(src))); \
int __pk_err; \
\
switch (sizeof(type)) { \
case 1: \
__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
break; \
case 2: \
__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
break; \
case 4: \
__pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
break; \
case 8: \
__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
break; \
default: \
__pk_err = __put_kernel_bad(); \
break; \
} \
if (unlikely(__pk_err)) \
goto err_label; \
} while (0)
int __noreturn __get_kernel_bad(void);
#define __get_kernel_asm(val, from, insn) \
({ \
int __rc; \
\
asm volatile( \
"0: " insn " %1,%2\n" \
"1: xr %0,%0\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: lhi %0,%3\n" \
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
: "=d" (__rc), "+d" (val) \
: "Q" (*(from)), "K" (-EFAULT) \
: "cc"); \
__rc; \
})
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
int __gk_err; \
\
switch (sizeof(type)) { \
case 1: { \
u8 __x = 0; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 2: { \
u16 __x = 0; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 4: { \
u32 __x = 0; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 8: { \
u64 __x = 0; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
default: \
__gk_err = __get_kernel_bad(); \
break; \
} \
if (unlikely(__gk_err)) \
goto err_label; \
} while (0)
#endif /* __S390_UACCESS_H */ #endif /* __S390_UACCESS_H */
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define UVC_CMD_DESTROY_SEC_CPU 0x0121 #define UVC_CMD_DESTROY_SEC_CPU 0x0121
#define UVC_CMD_CONV_TO_SEC_STOR 0x0200 #define UVC_CMD_CONV_TO_SEC_STOR 0x0200
#define UVC_CMD_CONV_FROM_SEC_STOR 0x0201 #define UVC_CMD_CONV_FROM_SEC_STOR 0x0201
#define UVC_CMD_DESTR_SEC_STOR 0x0202
#define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300 #define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300
#define UVC_CMD_UNPACK_IMG 0x0301 #define UVC_CMD_UNPACK_IMG 0x0301
#define UVC_CMD_VERIFY_IMG 0x0302 #define UVC_CMD_VERIFY_IMG 0x0302
...@@ -344,6 +345,7 @@ static inline int is_prot_virt_host(void) ...@@ -344,6 +345,7 @@ static inline int is_prot_virt_host(void)
} }
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
int uv_destroy_page(unsigned long paddr);
int uv_convert_from_secure(unsigned long paddr); int uv_convert_from_secure(unsigned long paddr);
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
...@@ -354,6 +356,11 @@ void adjust_to_uv_max(unsigned long *vmax); ...@@ -354,6 +356,11 @@ void adjust_to_uv_max(unsigned long *vmax);
static inline void setup_uv(void) {} static inline void setup_uv(void) {}
static inline void adjust_to_uv_max(unsigned long *vmax) {} static inline void adjust_to_uv_max(unsigned long *vmax) {}
static inline int uv_destroy_page(unsigned long paddr)
{
return 0;
}
static inline int uv_convert_from_secure(unsigned long paddr) static inline int uv_convert_from_secure(unsigned long paddr)
{ {
return 0; return 0;
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef __S390_VDSO_H__ #ifndef __S390_VDSO_H__
#define __S390_VDSO_H__ #define __S390_VDSO_H__
#include <vdso/datapage.h>
/* Default link addresses for the vDSOs */ /* Default link addresses for the vDSOs */
#define VDSO32_LBASE 0 #define VDSO32_LBASE 0
#define VDSO64_LBASE 0 #define VDSO64_LBASE 0
...@@ -18,30 +20,7 @@ ...@@ -18,30 +20,7 @@
* itself and may change without notice. * itself and may change without notice.
*/ */
struct vdso_data {
__u64 tb_update_count; /* Timebase atomicity ctr 0x00 */
__u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
__u64 xtime_clock_sec; /* Kernel time 0x10 */
__u64 xtime_clock_nsec; /* 0x18 */
__u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */
__u64 xtime_coarse_nsec; /* 0x28 */
__u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */
__u64 wtom_clock_nsec; /* 0x38 */
__u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */
__u64 wtom_coarse_nsec; /* 0x48 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */
__u32 tz_dsttime; /* Type of dst correction 0x54 */
__u32 ectg_available; /* ECTG instruction present 0x58 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
__u32 ts_dir; /* TOD steering direction 0x64 */
__u64 ts_end; /* TOD steering end 0x68 */
__u32 hrtimer_res; /* hrtimer resolution 0x70 */
};
struct vdso_per_cpu_data { struct vdso_per_cpu_data {
__u64 ectg_timer_base;
__u64 ectg_user_time;
/* /*
* Note: node_id and cpu_nr must be at adjacent memory locations. * Note: node_id and cpu_nr must be at adjacent memory locations.
* VDSO userspace must read both values with a single instruction. * VDSO userspace must read both values with a single instruction.
...@@ -56,9 +35,7 @@ struct vdso_per_cpu_data { ...@@ -56,9 +35,7 @@ struct vdso_per_cpu_data {
}; };
extern struct vdso_data *vdso_data; extern struct vdso_data *vdso_data;
extern struct vdso_data boot_vdso_data;
void vdso_alloc_boot_cpu(struct lowcore *lowcore);
int vdso_alloc_per_cpu(struct lowcore *lowcore); int vdso_alloc_per_cpu(struct lowcore *lowcore);
void vdso_free_per_cpu(struct lowcore *lowcore); void vdso_free_per_cpu(struct lowcore *lowcore);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSO_CLOCKSOURCE_H
#define __ASM_VDSO_CLOCKSOURCE_H
#define VDSO_ARCH_CLOCKMODES \
VDSO_CLOCKMODE_TOD
#endif /* __ASM_VDSO_CLOCKSOURCE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __S390_ASM_VDSO_DATA_H
#define __S390_ASM_VDSO_DATA_H
#include <linux/types.h>
#include <vdso/datapage.h>
struct arch_vdso_data {
__u64 tod_steering_delta;
__u64 tod_steering_end;
};
#endif /* __S390_ASM_VDSO_DATA_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_VDSO_GETTIMEOFDAY_H
#define ASM_VDSO_GETTIMEOFDAY_H
#define VDSO_HAS_TIME 1
#define VDSO_HAS_CLOCK_GETRES 1
#include <asm/timex.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#include <linux/compiler.h>
#define vdso_calc_delta __arch_vdso_calc_delta
static __always_inline u64 __arch_vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
{
return (cycles - last) * mult;
}
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
{
return _vdso_data;
}
static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd)
{
const struct vdso_data *vdso = __arch_get_vdso_data();
u64 adj, now;
now = get_tod_clock();
adj = vdso->arch_data.tod_steering_end - now;
if (unlikely((s64) adj > 0))
now += (vdso->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
return now;
}
static __always_inline
long clock_gettime_fallback(clockid_t clkid, struct __kernel_timespec *ts)
{
register unsigned long r1 __asm__("r1") = __NR_clock_gettime;
register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
register void *r3 __asm__("r3") = ts;
asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
return r2;
}
static __always_inline
long gettimeofday_fallback(register struct __kernel_old_timeval *tv,
register struct timezone *tz)
{
register unsigned long r1 __asm__("r1") = __NR_gettimeofday;
register unsigned long r2 __asm__("r2") = (unsigned long)tv;
register void *r3 __asm__("r3") = tz;
asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
return r2;
}
static __always_inline
long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts)
{
register unsigned long r1 __asm__("r1") = __NR_clock_getres;
register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
register void *r3 __asm__("r3") = ts;
asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
return r2;
}
#endif
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
#define cpu_relax() barrier()
#endif /* __ASM_VDSO_PROCESSOR_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
#ifndef __ASSEMBLY__
#include <linux/hrtimer.h>
#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
#include <asm/vdso.h>
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
static __always_inline struct vdso_data *__s390_get_k_vdso_data(void)
{
return vdso_data;
}
#define __arch_get_k_vdso_data __s390_get_k_vdso_data
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_VSYSCALL_H */
...@@ -25,8 +25,6 @@ extern void add_virt_timer_periodic(struct vtimer_list *timer); ...@@ -25,8 +25,6 @@ extern void add_virt_timer_periodic(struct vtimer_list *timer);
extern int mod_virt_timer(struct vtimer_list *timer, u64 expires); extern int mod_virt_timer(struct vtimer_list *timer, u64 expires);
extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires); extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires);
extern int del_virt_timer(struct vtimer_list *timer); extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void);
extern void vtime_init(void); extern void vtime_init(void);
#endif /* _ASM_S390_TIMER_H */ #endif /* _ASM_S390_TIMER_H */
...@@ -35,12 +35,16 @@ ...@@ -35,12 +35,16 @@
#define PKEY_KEYTYPE_AES_128 1 #define PKEY_KEYTYPE_AES_128 1
#define PKEY_KEYTYPE_AES_192 2 #define PKEY_KEYTYPE_AES_192 2
#define PKEY_KEYTYPE_AES_256 3 #define PKEY_KEYTYPE_AES_256 3
#define PKEY_KEYTYPE_ECC 4
/* the newer ioctls use a pkey_key_type enum for type information */ /* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type { enum pkey_key_type {
PKEY_TYPE_CCA_DATA = (__u32) 1, PKEY_TYPE_CCA_DATA = (__u32) 1,
PKEY_TYPE_CCA_CIPHER = (__u32) 2, PKEY_TYPE_CCA_CIPHER = (__u32) 2,
PKEY_TYPE_EP11 = (__u32) 3, PKEY_TYPE_EP11 = (__u32) 3,
PKEY_TYPE_CCA_ECC = (__u32) 0x1f,
PKEY_TYPE_EP11_AES = (__u32) 6,
PKEY_TYPE_EP11_ECC = (__u32) 7,
}; };
/* the newer ioctls use a pkey_key_size enum for key size information */ /* the newer ioctls use a pkey_key_size enum for key size information */
...@@ -88,6 +92,20 @@ struct pkey_clrkey { ...@@ -88,6 +92,20 @@ struct pkey_clrkey {
__u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */ __u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */
}; };
/*
* EP11 key blobs of type PKEY_TYPE_EP11_AES and PKEY_TYPE_EP11_ECC
* are ep11 blobs prepended by this header:
*/
struct ep11kblob_header {
__u8 type; /* always 0x00 */
__u8 hver; /* header version, currently needs to be 0x00 */
__u16 len; /* total length in bytes (including this header) */
__u8 version; /* PKEY_TYPE_EP11_AES or PKEY_TYPE_EP11_ECC */
__u8 res0; /* unused */
__u16 bitlen; /* clear key bit len, 0 for unknown */
__u8 res1[8]; /* unused */
} __packed;
/* /*
* Generate CCA AES secure key. * Generate CCA AES secure key.
*/ */
...@@ -304,7 +322,7 @@ struct pkey_verifykey2 { ...@@ -304,7 +322,7 @@ struct pkey_verifykey2 {
#define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2) #define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2)
/* /*
* Transform a key blob (of any type) into a protected key, version 2. * Transform a key blob into a protected key, version 2.
* There needs to be a list of apqns given with at least one entry in there. * There needs to be a list of apqns given with at least one entry in there.
* All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
* is not supported. The implementation walks through the list of apqns and * is not supported. The implementation walks through the list of apqns and
...@@ -313,6 +331,8 @@ struct pkey_verifykey2 { ...@@ -313,6 +331,8 @@ struct pkey_verifykey2 {
* list is tried until success (return 0) or the end of the list is reached * list is tried until success (return 0) or the end of the list is reached
* (return -1 with errno ENODEV). You may use the PKEY_APQNS4K ioctl to * (return -1 with errno ENODEV). You may use the PKEY_APQNS4K ioctl to
* generate a list of apqns based on the key. * generate a list of apqns based on the key.
* Deriving ECC protected keys from ECC secure keys is not supported with
* this ioctl, use PKEY_KBLOB2PROTK3 for this purpose.
*/ */
struct pkey_kblob2pkey2 { struct pkey_kblob2pkey2 {
__u8 __user *key; /* in: pointer to key blob */ __u8 __user *key; /* in: pointer to key blob */
...@@ -326,17 +346,17 @@ struct pkey_kblob2pkey2 { ...@@ -326,17 +346,17 @@ struct pkey_kblob2pkey2 {
/* /*
* Build a list of APQNs based on a key blob given. * Build a list of APQNs based on a key blob given.
* Is able to find out which type of secure key is given (CCA AES secure * Is able to find out which type of secure key is given (CCA AES secure
* key, CCA AES cipher key or EP11 AES key) and tries to find all matching * key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private
* crypto cards based on the MKVP and maybe other criterias (like CCA AES * key) and tries to find all matching crypto cards based on the MKVP and maybe
* cipher keys need a CEX5C or higher, EP11 keys with BLOB_PKEY_EXTRACTABLE * other criterias (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
* need a CEX7 and EP11 api version 4). The list of APQNs is further filtered * with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of
* by the key's mkvp which needs to match to either the current mkvp (CCA and * APQNs is further filtered by the key's mkvp which needs to match to either
* EP11) or the alternate mkvp (old mkvp, CCA adapters only) of the apqns. The * the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters
* flags argument may be used to limit the matching apqns. If the * only) of the apqns. The flags argument may be used to limit the matching
* PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of each apqn is * apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of
* compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it * each apqn is compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both
* is assumed to return apqns where either the current or the alternate mkvp * are given, it is assumed to return apqns where either the current or the
* matches. At least one of the matching flags needs to be given. * alternate mkvp matches. At least one of the matching flags needs to be given.
* The flags argument for EP11 keys has no further action and is currently * The flags argument for EP11 keys has no further action and is currently
* ignored (but needs to be given as PKEY_FLAGS_MATCH_CUR_MKVP) as there is only * ignored (but needs to be given as PKEY_FLAGS_MATCH_CUR_MKVP) as there is only
* the wkvp from the key to match against the apqn's wkvp. * the wkvp from the key to match against the apqn's wkvp.
...@@ -365,9 +385,10 @@ struct pkey_apqns4key { ...@@ -365,9 +385,10 @@ struct pkey_apqns4key {
* restrict the list by given master key verification patterns. * restrict the list by given master key verification patterns.
* For different key types there may be different ways to match the * For different key types there may be different ways to match the
* master key verification patterns. For CCA keys (CCA data key and CCA * master key verification patterns. For CCA keys (CCA data key and CCA
* cipher key) the first 8 bytes of cur_mkvp refer to the current mkvp value * cipher key) the first 8 bytes of cur_mkvp refer to the current AES mkvp value
* of the apqn and the first 8 bytes of the alt_mkvp refer to the old mkvp. * of the apqn and the first 8 bytes of the alt_mkvp refer to the old AES mkvp.
* The flags argument controls if the apqns current and/or alternate mkvp * For CCA ECC keys it is similar but the match is against the APKA current/old
* mkvp. The flags argument controls if the apqns current and/or alternate mkvp
* should match. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current * should match. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current
* mkvp of each apqn is compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. * mkvp of each apqn is compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP.
* If both are given, it is assumed to return apqns where either the * If both are given, it is assumed to return apqns where either the
...@@ -397,4 +418,30 @@ struct pkey_apqns4keytype { ...@@ -397,4 +418,30 @@ struct pkey_apqns4keytype {
}; };
#define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype) #define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype)
/*
* Transform a key blob into a protected key, version 3.
* The difference to version 2 of this ioctl is that the protected key
* buffer is now explicitly and not within a struct pkey_protkey any more.
* So this ioctl is also able to handle EP11 and CCA ECC secure keys and
* provide ECC protected keys.
* There needs to be a list of apqns given with at least one entry in there.
* All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
* is not supported. The implementation walks through the list of apqns and
* tries to send the request to each apqn without any further checking (like
* card type or online state). If the apqn fails, simple the next one in the
* list is tried until success (return 0) or the end of the list is reached
* (return -1 with errno ENODEV). You may use the PKEY_APQNS4K ioctl to
* generate a list of apqns based on the key.
*/
struct pkey_kblob2pkey3 {
__u8 __user *key; /* in: pointer to key blob */
__u32 keylen; /* in: key blob size */
struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */
__u32 apqn_entries; /* in: # of apqn target list entries */
__u32 pkeytype; /* out: prot key type (enum pkey_key_type) */
__u32 pkeylen; /* in/out: size of pkey buffer/actual len of pkey */
__u8 __user *pkey; /* in: pkey blob buffer space ptr */
};
#define PKEY_KBLOB2PROTK3 _IOWR(PKEY_IOCTL_MAGIC, 0x1D, struct pkey_kblob2pkey3)
#endif /* _UAPI_PKEY_H */ #endif /* _UAPI_PKEY_H */
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
{ 0x13, "SIGP conditional emergency signal" }, \ { 0x13, "SIGP conditional emergency signal" }, \
{ 0x15, "SIGP sense running" }, \ { 0x15, "SIGP sense running" }, \
{ 0x16, "SIGP set multithreading"}, \ { 0x16, "SIGP set multithreading"}, \
{ 0x17, "SIGP store additional status ait address"} { 0x17, "SIGP store additional status at address"}
#define icpt_prog_codes \ #define icpt_prog_codes \
{ 0x0001, "Prog Operation" }, \ { 0x0001, "Prog Operation" }, \
......
...@@ -57,6 +57,7 @@ obj-$(CONFIG_COMPAT) += $(compat-obj-y) ...@@ -57,6 +57,7 @@ obj-$(CONFIG_COMPAT) += $(compat-obj-y)
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KPROBES) += kprobes_insn_page.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_UPROBES) += uprobes.o
......
...@@ -59,26 +59,6 @@ int main(void) ...@@ -59,26 +59,6 @@ int main(void)
OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]); OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]); OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]);
BLANK(); BLANK();
/* timeval/timezone offsets for use by vdso */
OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
OFFSET(__VDSO_XTIME_STAMP, vdso_data, xtime_tod_stamp);
OFFSET(__VDSO_XTIME_SEC, vdso_data, xtime_clock_sec);
OFFSET(__VDSO_XTIME_NSEC, vdso_data, xtime_clock_nsec);
OFFSET(__VDSO_XTIME_CRS_SEC, vdso_data, xtime_coarse_sec);
OFFSET(__VDSO_XTIME_CRS_NSEC, vdso_data, xtime_coarse_nsec);
OFFSET(__VDSO_WTOM_SEC, vdso_data, wtom_clock_sec);
OFFSET(__VDSO_WTOM_NSEC, vdso_data, wtom_clock_nsec);
OFFSET(__VDSO_WTOM_CRS_SEC, vdso_data, wtom_coarse_sec);
OFFSET(__VDSO_WTOM_CRS_NSEC, vdso_data, wtom_coarse_nsec);
OFFSET(__VDSO_TIMEZONE, vdso_data, tz_minuteswest);
OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
OFFSET(__VDSO_TS_END, vdso_data, ts_end);
OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val); OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
BLANK(); BLANK();
/* constants used by the vdso */ /* constants used by the vdso */
......
...@@ -141,7 +141,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count) ...@@ -141,7 +141,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
while (count) { while (count) {
from = __pa(src); from = __pa(src);
if (!OLDMEM_BASE && from < sclp.hsa_size) { if (!OLDMEM_BASE && from < sclp.hsa_size) {
/* Copy from zfcpdump HSA area */ /* Copy from zfcp/nvme dump HSA area */
len = min(count, sclp.hsa_size - from); len = min(count, sclp.hsa_size - from);
rc = memcpy_hsa_kernel(dst, from, len); rc = memcpy_hsa_kernel(dst, from, len);
if (rc) if (rc)
...@@ -184,7 +184,7 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count) ...@@ -184,7 +184,7 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
while (count) { while (count) {
from = __pa(src); from = __pa(src);
if (!OLDMEM_BASE && from < sclp.hsa_size) { if (!OLDMEM_BASE && from < sclp.hsa_size) {
/* Copy from zfcpdump HSA area */ /* Copy from zfcp/nvme dump HSA area */
len = min(count, sclp.hsa_size - from); len = min(count, sclp.hsa_size - from);
rc = memcpy_hsa_user(dst, from, len); rc = memcpy_hsa_user(dst, from, len);
if (rc) if (rc)
...@@ -258,7 +258,7 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma, ...@@ -258,7 +258,7 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
} }
/* /*
* Remap "oldmem" for zfcpdump * Remap "oldmem" for zfcp/nvme dump
* *
* We only map available memory above HSA size. Memory below HSA size * We only map available memory above HSA size. Memory below HSA size
* is read on demand using the copy_oldmem_page() function. * is read on demand using the copy_oldmem_page() function.
...@@ -283,7 +283,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma, ...@@ -283,7 +283,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
} }
/* /*
* Remap "oldmem" for kdump or zfcpdump * Remap "oldmem" for kdump or zfcp/nvme dump
*/ */
int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from, int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn, unsigned long size, pgprot_t prot) unsigned long pfn, unsigned long size, pgprot_t prot)
...@@ -632,11 +632,11 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) ...@@ -632,11 +632,11 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
u32 alloc_size; u32 alloc_size;
u64 hdr_off; u64 hdr_off;
/* If we are not in kdump or zfcpdump mode return */ /* If we are not in kdump or zfcp/nvme dump mode return */
if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP) if (!OLDMEM_BASE && !is_ipl_type_dump())
return 0; return 0;
/* If we cannot get HSA size for zfcpdump return error */ /* If we cannot get HSA size for zfcp/nvme dump return error */
if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp.hsa_size) if (is_ipl_type_dump() && !sclp.hsa_size)
return -ENODEV; return -ENODEV;
/* For kdump, exclude previous crashkernel memory */ /* For kdump, exclude previous crashkernel memory */
......
...@@ -104,18 +104,7 @@ static const struct seq_operations show_diag_stat_sops = { ...@@ -104,18 +104,7 @@ static const struct seq_operations show_diag_stat_sops = {
.show = show_diag_stat, .show = show_diag_stat,
}; };
static int show_diag_stat_open(struct inode *inode, struct file *file) DEFINE_SEQ_ATTRIBUTE(show_diag_stat);
{
return seq_open(file, &show_diag_stat_sops);
}
static const struct file_operations show_diag_stat_fops = {
.open = show_diag_stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init show_diag_stat_init(void) static int __init show_diag_stat_init(void)
{ {
......
...@@ -482,31 +482,37 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) ...@@ -482,31 +482,37 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
return (int) (ptr - buffer); return (int) (ptr - buffer);
} }
static int copy_from_regs(struct pt_regs *regs, void *dst, void *src, int len)
{
if (user_mode(regs)) {
if (copy_from_user(dst, (char __user *)src, len))
return -EFAULT;
} else {
if (copy_from_kernel_nofault(dst, src, len))
return -EFAULT;
}
return 0;
}
void show_code(struct pt_regs *regs) void show_code(struct pt_regs *regs)
{ {
char *mode = user_mode(regs) ? "User" : "Krnl"; char *mode = user_mode(regs) ? "User" : "Krnl";
unsigned char code[64]; unsigned char code[64];
char buffer[128], *ptr; char buffer[128], *ptr;
mm_segment_t old_fs;
unsigned long addr; unsigned long addr;
int start, end, opsize, hops, i; int start, end, opsize, hops, i;
/* Get a snapshot of the 64 bytes surrounding the fault address. */ /* Get a snapshot of the 64 bytes surrounding the fault address. */
old_fs = get_fs();
set_fs(user_mode(regs) ? USER_DS : KERNEL_DS);
for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
addr = regs->psw.addr - 34 + start; addr = regs->psw.addr - 34 + start;
if (__copy_from_user(code + start - 2, if (copy_from_regs(regs, code + start - 2, (void *)addr, 2))
(char __user *) addr, 2))
break; break;
} }
for (end = 32; end < 64; end += 2) { for (end = 32; end < 64; end += 2) {
addr = regs->psw.addr + end - 32; addr = regs->psw.addr + end - 32;
if (__copy_from_user(code + end, if (copy_from_regs(regs, code + end, (void *)addr, 2))
(char __user *) addr, 2))
break; break;
} }
set_fs(old_fs);
/* Code snapshot useable ? */ /* Code snapshot useable ? */
if ((regs->psw.addr & 1) || start >= end) { if ((regs->psw.addr & 1) || start >= end) {
printk("%s Code: Bad PSW.\n", mode); printk("%s Code: Bad PSW.\n", mode);
......
...@@ -274,19 +274,6 @@ static int __init disable_vector_extension(char *str) ...@@ -274,19 +274,6 @@ static int __init disable_vector_extension(char *str)
} }
early_param("novx", disable_vector_extension); early_param("novx", disable_vector_extension);
static int __init cad_setup(char *str)
{
bool enabled;
int rc;
rc = kstrtobool(str, &enabled);
if (!rc && enabled && test_facility(128))
/* Enable problem state CAD. */
__ctl_set_bit(2, 3);
return rc;
}
early_param("cad", cad_setup);
char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
static void __init setup_boot_command_line(void) static void __init setup_boot_command_line(void)
{ {
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
static void sclp_early_write(struct console *con, const char *s, unsigned int len) static void sclp_early_write(struct console *con, const char *s, unsigned int len)
{ {
__sclp_early_printk(s, len, 0); __sclp_early_printk(s, len);
} }
static struct console sclp_early_console = { static struct console sclp_early_console = {
......
...@@ -435,10 +435,8 @@ ENTRY(system_call) ...@@ -435,10 +435,8 @@ ENTRY(system_call)
jz .Lsysc_skip_fpu jz .Lsysc_skip_fpu
brasl %r14,load_fpu_regs brasl %r14,load_fpu_regs
.Lsysc_skip_fpu: .Lsysc_skip_fpu:
lg %r14,__LC_VDSO_PER_CPU
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r0,%r15,__PT_R0(%r11) lmg %r0,%r15,__PT_R0(%r11)
b __LC_RETURN_LPSWE b __LC_RETURN_LPSWE
...@@ -797,13 +795,11 @@ ENTRY(io_int_handler) ...@@ -797,13 +795,11 @@ ENTRY(io_int_handler)
TRACE_IRQS_ON TRACE_IRQS_ON
0: 0:
#endif #endif
lg %r14,__LC_VDSO_PER_CPU
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
tm __PT_PSW+1(%r11),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lio_exit_kernel jno .Lio_exit_kernel
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
.Lio_exit_kernel: .Lio_exit_kernel:
lmg %r0,%r15,__PT_R0(%r11) lmg %r0,%r15,__PT_R0(%r11)
b __LC_RETURN_LPSWE b __LC_RETURN_LPSWE
...@@ -1213,14 +1209,12 @@ ENTRY(mcck_int_handler) ...@@ -1213,14 +1209,12 @@ ENTRY(mcck_int_handler)
brasl %r14,s390_handle_mcck brasl %r14,s390_handle_mcck
TRACE_IRQS_ON TRACE_IRQS_ON
.Lmcck_return: .Lmcck_return:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11) lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f jno 0f
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0: lmg %r11,%r15,__PT_R11(%r11) 0: lmg %r11,%r15,__PT_R11(%r11)
b __LC_RETURN_MCCK_LPSWE b __LC_RETURN_MCCK_LPSWE
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <asm/idle.h> #include <asm/idle.h>
extern void *restart_stack; extern void *restart_stack;
extern unsigned long suspend_zero_pages;
void system_call(void); void system_call(void);
void pgm_check_handler(void); void pgm_check_handler(void);
...@@ -17,7 +16,6 @@ void ext_int_handler(void); ...@@ -17,7 +16,6 @@ void ext_int_handler(void);
void io_int_handler(void); void io_int_handler(void);
void mcck_int_handler(void); void mcck_int_handler(void);
void restart_int_handler(void); void restart_int_handler(void);
void restart_call_handler(void);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
...@@ -62,12 +60,10 @@ void do_notify_resume(struct pt_regs *regs); ...@@ -62,12 +60,10 @@ void do_notify_resume(struct pt_regs *regs);
void __init init_IRQ(void); void __init init_IRQ(void);
void do_IRQ(struct pt_regs *regs, int irq); void do_IRQ(struct pt_regs *regs, int irq);
void do_restart(void); void do_restart(void);
void __init startup_init_nobss(void);
void __init startup_init(void); void __init startup_init(void);
void die(struct pt_regs *regs, const char *str); void die(struct pt_regs *regs, const char *str);
int setup_profiling_timer(unsigned int multiplier); int setup_profiling_timer(unsigned int multiplier);
void __init time_init(void); void __init time_init(void);
void s390_early_resume(void);
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip); unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip);
struct s390_mmap_arg_struct; struct s390_mmap_arg_struct;
...@@ -92,4 +88,6 @@ void set_fs_fixup(void); ...@@ -92,4 +88,6 @@ void set_fs_fixup(void);
unsigned long stack_alloc(void); unsigned long stack_alloc(void);
void stack_free(unsigned long stack); void stack_free(unsigned long stack);
extern char kprobes_insn_page[];
#endif /* _ENTRY_H */ #endif /* _ENTRY_H */
...@@ -40,10 +40,12 @@ ...@@ -40,10 +40,12 @@
#define IPL_FCP_STR "fcp" #define IPL_FCP_STR "fcp"
#define IPL_FCP_DUMP_STR "fcp_dump" #define IPL_FCP_DUMP_STR "fcp_dump"
#define IPL_NVME_STR "nvme" #define IPL_NVME_STR "nvme"
#define IPL_NVME_DUMP_STR "nvme_dump"
#define IPL_NSS_STR "nss" #define IPL_NSS_STR "nss"
#define DUMP_CCW_STR "ccw" #define DUMP_CCW_STR "ccw"
#define DUMP_FCP_STR "fcp" #define DUMP_FCP_STR "fcp"
#define DUMP_NVME_STR "nvme"
#define DUMP_NONE_STR "none" #define DUMP_NONE_STR "none"
/* /*
...@@ -96,6 +98,8 @@ static char *ipl_type_str(enum ipl_type type) ...@@ -96,6 +98,8 @@ static char *ipl_type_str(enum ipl_type type)
return IPL_NSS_STR; return IPL_NSS_STR;
case IPL_TYPE_NVME: case IPL_TYPE_NVME:
return IPL_NVME_STR; return IPL_NVME_STR;
case IPL_TYPE_NVME_DUMP:
return IPL_NVME_DUMP_STR;
case IPL_TYPE_UNKNOWN: case IPL_TYPE_UNKNOWN:
default: default:
return IPL_UNKNOWN_STR; return IPL_UNKNOWN_STR;
...@@ -106,6 +110,7 @@ enum dump_type { ...@@ -106,6 +110,7 @@ enum dump_type {
DUMP_TYPE_NONE = 1, DUMP_TYPE_NONE = 1,
DUMP_TYPE_CCW = 2, DUMP_TYPE_CCW = 2,
DUMP_TYPE_FCP = 4, DUMP_TYPE_FCP = 4,
DUMP_TYPE_NVME = 8,
}; };
static char *dump_type_str(enum dump_type type) static char *dump_type_str(enum dump_type type)
...@@ -117,6 +122,8 @@ static char *dump_type_str(enum dump_type type) ...@@ -117,6 +122,8 @@ static char *dump_type_str(enum dump_type type)
return DUMP_CCW_STR; return DUMP_CCW_STR;
case DUMP_TYPE_FCP: case DUMP_TYPE_FCP:
return DUMP_FCP_STR; return DUMP_FCP_STR;
case DUMP_TYPE_NVME:
return DUMP_NVME_STR;
default: default:
return NULL; return NULL;
} }
...@@ -144,10 +151,12 @@ static struct ipl_parameter_block *reipl_block_actual; ...@@ -144,10 +151,12 @@ static struct ipl_parameter_block *reipl_block_actual;
static int dump_capabilities = DUMP_TYPE_NONE; static int dump_capabilities = DUMP_TYPE_NONE;
static enum dump_type dump_type = DUMP_TYPE_NONE; static enum dump_type dump_type = DUMP_TYPE_NONE;
static struct ipl_parameter_block *dump_block_fcp; static struct ipl_parameter_block *dump_block_fcp;
static struct ipl_parameter_block *dump_block_nvme;
static struct ipl_parameter_block *dump_block_ccw; static struct ipl_parameter_block *dump_block_ccw;
static struct sclp_ipl_info sclp_ipl_info; static struct sclp_ipl_info sclp_ipl_info;
static bool reipl_nvme_clear;
static bool reipl_fcp_clear; static bool reipl_fcp_clear;
static bool reipl_ccw_clear; static bool reipl_ccw_clear;
...@@ -266,7 +275,10 @@ static __init enum ipl_type get_ipl_type(void) ...@@ -266,7 +275,10 @@ static __init enum ipl_type get_ipl_type(void)
else else
return IPL_TYPE_FCP; return IPL_TYPE_FCP;
case IPL_PBT_NVME: case IPL_PBT_NVME:
return IPL_TYPE_NVME; if (ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP)
return IPL_TYPE_NVME_DUMP;
else
return IPL_TYPE_NVME;
} }
return IPL_TYPE_UNKNOWN; return IPL_TYPE_UNKNOWN;
} }
...@@ -324,6 +336,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj, ...@@ -324,6 +336,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
case IPL_TYPE_FCP_DUMP: case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno); return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
case IPL_TYPE_NVME: case IPL_TYPE_NVME:
case IPL_TYPE_NVME_DUMP:
return sprintf(page, "%08ux\n", ipl_block.nvme.fid); return sprintf(page, "%08ux\n", ipl_block.nvme.fid);
default: default:
return 0; return 0;
...@@ -531,6 +544,7 @@ static int __init ipl_init(void) ...@@ -531,6 +544,7 @@ static int __init ipl_init(void)
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break; break;
case IPL_TYPE_NVME: case IPL_TYPE_NVME:
case IPL_TYPE_NVME_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nvme_attr_group); rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nvme_attr_group);
break; break;
default: default:
...@@ -873,6 +887,24 @@ static struct attribute_group reipl_nvme_attr_group = { ...@@ -873,6 +887,24 @@ static struct attribute_group reipl_nvme_attr_group = {
.bin_attrs = reipl_nvme_bin_attrs .bin_attrs = reipl_nvme_bin_attrs
}; };
static ssize_t reipl_nvme_clear_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%u\n", reipl_nvme_clear);
}
static ssize_t reipl_nvme_clear_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
if (strtobool(buf, &reipl_nvme_clear) < 0)
return -EINVAL;
return len;
}
static struct kobj_attribute sys_reipl_nvme_clear_attr =
__ATTR(clear, 0644, reipl_nvme_clear_show, reipl_nvme_clear_store);
/* CCW reipl device attributes */ /* CCW reipl device attributes */
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw); DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
...@@ -1099,7 +1131,10 @@ static void __reipl_run(void *unused) ...@@ -1099,7 +1131,10 @@ static void __reipl_run(void *unused)
break; break;
case IPL_TYPE_NVME: case IPL_TYPE_NVME:
diag308(DIAG308_SET, reipl_block_nvme); diag308(DIAG308_SET, reipl_block_nvme);
diag308(DIAG308_LOAD_CLEAR, NULL); if (reipl_nvme_clear)
diag308(DIAG308_LOAD_CLEAR, NULL);
else
diag308(DIAG308_LOAD_NORMAL, NULL);
break; break;
case IPL_TYPE_NSS: case IPL_TYPE_NSS:
diag308(DIAG308_SET, reipl_block_nss); diag308(DIAG308_SET, reipl_block_nss);
...@@ -1109,6 +1144,7 @@ static void __reipl_run(void *unused) ...@@ -1109,6 +1144,7 @@ static void __reipl_run(void *unused)
diag308(DIAG308_LOAD_CLEAR, NULL); diag308(DIAG308_LOAD_CLEAR, NULL);
break; break;
case IPL_TYPE_FCP_DUMP: case IPL_TYPE_FCP_DUMP:
case IPL_TYPE_NVME_DUMP:
break; break;
} }
disabled_wait(); disabled_wait();
...@@ -1219,8 +1255,9 @@ static int __init reipl_fcp_init(void) ...@@ -1219,8 +1255,9 @@ static int __init reipl_fcp_init(void)
&sys_reipl_fcp_clear_attr.attr); &sys_reipl_fcp_clear_attr.attr);
if (rc) if (rc)
goto out2; goto out2;
} else } else {
reipl_fcp_clear = true; reipl_fcp_clear = true;
}
if (ipl_info.type == IPL_TYPE_FCP) { if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, &ipl_block, sizeof(ipl_block)); memcpy(reipl_block_fcp, &ipl_block, sizeof(ipl_block));
...@@ -1266,10 +1303,16 @@ static int __init reipl_nvme_init(void) ...@@ -1266,10 +1303,16 @@ static int __init reipl_nvme_init(void)
} }
rc = sysfs_create_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group); rc = sysfs_create_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group);
if (rc) { if (rc)
kset_unregister(reipl_nvme_kset); goto out1;
free_page((unsigned long) reipl_block_nvme);
return rc; if (test_facility(141)) {
rc = sysfs_create_file(&reipl_nvme_kset->kobj,
&sys_reipl_nvme_clear_attr.attr);
if (rc)
goto out2;
} else {
reipl_nvme_clear = true;
} }
if (ipl_info.type == IPL_TYPE_NVME) { if (ipl_info.type == IPL_TYPE_NVME) {
...@@ -1290,6 +1333,13 @@ static int __init reipl_nvme_init(void) ...@@ -1290,6 +1333,13 @@ static int __init reipl_nvme_init(void)
} }
reipl_capabilities |= IPL_TYPE_NVME; reipl_capabilities |= IPL_TYPE_NVME;
return 0; return 0;
out2:
sysfs_remove_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group);
out1:
kset_unregister(reipl_nvme_kset);
free_page((unsigned long) reipl_block_nvme);
return rc;
} }
static int __init reipl_type_init(void) static int __init reipl_type_init(void)
...@@ -1382,6 +1432,29 @@ static struct attribute_group dump_fcp_attr_group = { ...@@ -1382,6 +1432,29 @@ static struct attribute_group dump_fcp_attr_group = {
.attrs = dump_fcp_attrs, .attrs = dump_fcp_attrs,
}; };
/* NVME dump device attributes */
DEFINE_IPL_ATTR_RW(dump_nvme, fid, "0x%08llx\n", "%llx\n",
dump_block_nvme->nvme.fid);
DEFINE_IPL_ATTR_RW(dump_nvme, nsid, "0x%08llx\n", "%llx\n",
dump_block_nvme->nvme.nsid);
DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n",
dump_block_nvme->nvme.bootprog);
DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n",
dump_block_nvme->nvme.br_lba);
static struct attribute *dump_nvme_attrs[] = {
&sys_dump_nvme_fid_attr.attr,
&sys_dump_nvme_nsid_attr.attr,
&sys_dump_nvme_bootprog_attr.attr,
&sys_dump_nvme_br_lba_attr.attr,
NULL,
};
static struct attribute_group dump_nvme_attr_group = {
.name = IPL_NVME_STR,
.attrs = dump_nvme_attrs,
};
/* CCW dump device attributes */ /* CCW dump device attributes */
DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ccw); DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ccw);
...@@ -1423,6 +1496,8 @@ static ssize_t dump_type_store(struct kobject *kobj, ...@@ -1423,6 +1496,8 @@ static ssize_t dump_type_store(struct kobject *kobj,
rc = dump_set_type(DUMP_TYPE_CCW); rc = dump_set_type(DUMP_TYPE_CCW);
else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_FCP); rc = dump_set_type(DUMP_TYPE_FCP);
else if (strncmp(buf, DUMP_NVME_STR, strlen(DUMP_NVME_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_NVME);
return (rc != 0) ? rc : len; return (rc != 0) ? rc : len;
} }
...@@ -1450,6 +1525,9 @@ static void __dump_run(void *unused) ...@@ -1450,6 +1525,9 @@ static void __dump_run(void *unused)
case DUMP_TYPE_FCP: case DUMP_TYPE_FCP:
diag308_dump(dump_block_fcp); diag308_dump(dump_block_fcp);
break; break;
case DUMP_TYPE_NVME:
diag308_dump(dump_block_nvme);
break;
default: default:
break; break;
} }
...@@ -1506,6 +1584,29 @@ static int __init dump_fcp_init(void) ...@@ -1506,6 +1584,29 @@ static int __init dump_fcp_init(void)
return 0; return 0;
} }
static int __init dump_nvme_init(void)
{
int rc;
if (!sclp_ipl_info.has_dump)
return 0; /* LDIPL DUMP is not installed */
dump_block_nvme = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_nvme)
return -ENOMEM;
rc = sysfs_create_group(&dump_kset->kobj, &dump_nvme_attr_group);
if (rc) {
free_page((unsigned long)dump_block_nvme);
return rc;
}
dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN;
dump_block_nvme->fcp.pbt = IPL_PBT_NVME;
dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_NVME;
return 0;
}
static int __init dump_init(void) static int __init dump_init(void)
{ {
int rc; int rc;
...@@ -1522,6 +1623,9 @@ static int __init dump_init(void) ...@@ -1522,6 +1623,9 @@ static int __init dump_init(void)
if (rc) if (rc)
return rc; return rc;
rc = dump_fcp_init(); rc = dump_fcp_init();
if (rc)
return rc;
rc = dump_nvme_init();
if (rc) if (rc)
return rc; return rc;
dump_set_type(DUMP_TYPE_NONE); dump_set_type(DUMP_TYPE_NONE);
...@@ -1956,6 +2060,7 @@ void __init setup_ipl(void) ...@@ -1956,6 +2060,7 @@ void __init setup_ipl(void)
ipl_info.data.fcp.lun = ipl_block.fcp.lun; ipl_info.data.fcp.lun = ipl_block.fcp.lun;
break; break;
case IPL_TYPE_NVME: case IPL_TYPE_NVME:
case IPL_TYPE_NVME_DUMP:
ipl_info.data.nvme.fid = ipl_block.nvme.fid; ipl_info.data.nvme.fid = ipl_block.nvme.fid;
ipl_info.data.nvme.nsid = ipl_block.nvme.nsid; ipl_info.data.nvme.nsid = ipl_block.nvme.nsid;
break; break;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
*/ */
#include <linux/moduleloader.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/preempt.h> #include <linux/preempt.h>
...@@ -21,6 +22,7 @@ ...@@ -21,6 +22,7 @@
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/dis.h> #include <asm/dis.h>
#include "entry.h"
DEFINE_PER_CPU(struct kprobe *, current_kprobe); DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
...@@ -30,19 +32,32 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { }; ...@@ -30,19 +32,32 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { };
DEFINE_INSN_CACHE_OPS(s390_insn); DEFINE_INSN_CACHE_OPS(s390_insn);
static int insn_page_in_use; static int insn_page_in_use;
static char insn_page[PAGE_SIZE] __aligned(PAGE_SIZE);
void *alloc_insn_page(void)
{
void *page;
page = module_alloc(PAGE_SIZE);
if (!page)
return NULL;
__set_memory((unsigned long) page, 1, SET_MEMORY_RO | SET_MEMORY_X);
return page;
}
void free_insn_page(void *page)
{
module_memfree(page);
}
static void *alloc_s390_insn_page(void) static void *alloc_s390_insn_page(void)
{ {
if (xchg(&insn_page_in_use, 1) == 1) if (xchg(&insn_page_in_use, 1) == 1)
return NULL; return NULL;
set_memory_x((unsigned long) &insn_page, 1); return &kprobes_insn_page;
return &insn_page;
} }
static void free_s390_insn_page(void *page) static void free_s390_insn_page(void *page)
{ {
set_memory_nx((unsigned long) page, 1);
xchg(&insn_page_in_use, 0); xchg(&insn_page_in_use, 0);
} }
...@@ -56,25 +71,29 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = { ...@@ -56,25 +71,29 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = {
static void copy_instruction(struct kprobe *p) static void copy_instruction(struct kprobe *p)
{ {
kprobe_opcode_t insn[MAX_INSN_SIZE];
s64 disp, new_disp; s64 disp, new_disp;
u64 addr, new_addr; u64 addr, new_addr;
unsigned int len;
memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); len = insn_length(*p->addr >> 8);
p->opcode = p->ainsn.insn[0]; memcpy(&insn, p->addr, len);
if (!probe_is_insn_relative_long(p->ainsn.insn)) p->opcode = insn[0];
return; if (probe_is_insn_relative_long(&insn[0])) {
/* /*
* For pc-relative instructions in RIL-b or RIL-c format patch the * For pc-relative instructions in RIL-b or RIL-c format patch
* RI2 displacement field. We have already made sure that the insn * the RI2 displacement field. We have already made sure that
* slot for the patched instruction is within the same 2GB area * the insn slot for the patched instruction is within the same
* as the original instruction (either kernel image or module area). * 2GB area as the original instruction (either kernel image or
* Therefore the new displacement will always fit. * module area). Therefore the new displacement will always fit.
*/ */
disp = *(s32 *)&p->ainsn.insn[1]; disp = *(s32 *)&insn[1];
addr = (u64)(unsigned long)p->addr; addr = (u64)(unsigned long)p->addr;
new_addr = (u64)(unsigned long)p->ainsn.insn; new_addr = (u64)(unsigned long)p->ainsn.insn;
new_disp = ((addr + (disp * 2)) - new_addr) / 2; new_disp = ((addr + (disp * 2)) - new_addr) / 2;
*(s32 *)&p->ainsn.insn[1] = new_disp; *(s32 *)&insn[1] = new_disp;
}
s390_kernel_write(p->ainsn.insn, &insn, len);
} }
NOKPROBE_SYMBOL(copy_instruction); NOKPROBE_SYMBOL(copy_instruction);
......
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
/*
* insn_page is a special 4k aligned dummy function for kprobes.
* It will contain all kprobed instructions that are out-of-line executed.
* The page must be within the kernel image to guarantee that the
* out-of-line instructions are within 2GB distance of their original
* location. Using a dummy function ensures that the insn_page is within
* the text section of the kernel and mapped read-only/executable from
* the beginning on, thus avoiding to split large mappings if the page
* would be in the data section instead.
*/
.section .kprobes.text, "ax"
.align 4096
ENTRY(kprobes_insn_page)
.rept 2048
.word 0x07fe
.endr
ENDPROC(kprobes_insn_page)
.previous
...@@ -102,7 +102,6 @@ struct mem_detect_info __bootdata(mem_detect); ...@@ -102,7 +102,6 @@ struct mem_detect_info __bootdata(mem_detect);
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table); struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table); struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
unsigned long __bootdata_preserved(__swsusp_reset_dma);
unsigned long __bootdata_preserved(__stext_dma); unsigned long __bootdata_preserved(__stext_dma);
unsigned long __bootdata_preserved(__etext_dma); unsigned long __bootdata_preserved(__etext_dma);
unsigned long __bootdata_preserved(__sdma); unsigned long __bootdata_preserved(__sdma);
...@@ -119,6 +118,7 @@ EXPORT_SYMBOL(VMALLOC_END); ...@@ -119,6 +118,7 @@ EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap; struct page *vmemmap;
EXPORT_SYMBOL(vmemmap); EXPORT_SYMBOL(vmemmap);
unsigned long vmemmap_size;
unsigned long MODULES_VADDR; unsigned long MODULES_VADDR;
unsigned long MODULES_END; unsigned long MODULES_END;
...@@ -127,6 +127,12 @@ unsigned long MODULES_END; ...@@ -127,6 +127,12 @@ unsigned long MODULES_END;
struct lowcore *lowcore_ptr[NR_CPUS]; struct lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr); EXPORT_SYMBOL(lowcore_ptr);
/*
* The Write Back bit position in the physaddr is given by the SLPC PCI.
* Leaving the mask zero always uses write through which is safe
*/
unsigned long mio_wb_bit_mask __ro_after_init;
/* /*
* This is set up by the setup-routine at boot-time * This is set up by the setup-routine at boot-time
* for S390 need to find out, what we have to setup * for S390 need to find out, what we have to setup
...@@ -245,7 +251,7 @@ static void __init conmode_default(void) ...@@ -245,7 +251,7 @@ static void __init conmode_default(void)
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
static void __init setup_zfcpdump(void) static void __init setup_zfcpdump(void)
{ {
if (ipl_info.type != IPL_TYPE_FCP_DUMP) if (!is_ipl_type_dump())
return; return;
if (OLDMEM_BASE) if (OLDMEM_BASE)
return; return;
...@@ -300,7 +306,7 @@ void machine_power_off(void) ...@@ -300,7 +306,7 @@ void machine_power_off(void)
void (*pm_power_off)(void) = machine_power_off; void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL_GPL(pm_power_off); EXPORT_SYMBOL_GPL(pm_power_off);
void *restart_stack __section(.data); void *restart_stack;
unsigned long stack_alloc(void) unsigned long stack_alloc(void)
{ {
...@@ -366,8 +372,12 @@ void __init arch_call_rest_init(void) ...@@ -366,8 +372,12 @@ void __init arch_call_rest_init(void)
static void __init setup_lowcore_dat_off(void) static void __init setup_lowcore_dat_off(void)
{ {
unsigned long int_psw_mask = PSW_KERNEL_BITS;
struct lowcore *lc; struct lowcore *lc;
if (IS_ENABLED(CONFIG_KASAN))
int_psw_mask |= PSW_MASK_DAT;
/* /*
* Setup lowcore for boot cpu * Setup lowcore for boot cpu
*/ */
...@@ -379,15 +389,15 @@ static void __init setup_lowcore_dat_off(void) ...@@ -379,15 +389,15 @@ static void __init setup_lowcore_dat_off(void)
lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler; lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
lc->external_new_psw.addr = (unsigned long) ext_int_handler; lc->external_new_psw.addr = (unsigned long) ext_int_handler;
lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = (unsigned long) system_call; lc->svc_new_psw.addr = (unsigned long) system_call;
lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
lc->program_new_psw.addr = (unsigned long) pgm_check_handler; lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
lc->mcck_new_psw.mask = PSW_KERNEL_BITS; lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max; lc->clock_comparator = clock_comparator_max;
lc->nodat_stack = ((unsigned long) &init_thread_union) lc->nodat_stack = ((unsigned long) &init_thread_union)
...@@ -402,7 +412,6 @@ static void __init setup_lowcore_dat_off(void) ...@@ -402,7 +412,6 @@ static void __init setup_lowcore_dat_off(void)
memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
sizeof(lc->alt_stfle_fac_list)); sizeof(lc->alt_stfle_fac_list));
nmi_alloc_boot_cpu(lc); nmi_alloc_boot_cpu(lc);
vdso_alloc_boot_cpu(lc);
lc->sync_enter_timer = S390_lowcore.sync_enter_timer; lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc->async_enter_timer = S390_lowcore.async_enter_timer; lc->async_enter_timer = S390_lowcore.async_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer; lc->exit_timer = S390_lowcore.exit_timer;
...@@ -552,22 +561,17 @@ static void __init setup_memory_end(void) ...@@ -552,22 +561,17 @@ static void __init setup_memory_end(void)
unsigned long vmax, tmp; unsigned long vmax, tmp;
/* Choose kernel address space layout: 3 or 4 levels. */ /* Choose kernel address space layout: 3 or 4 levels. */
if (IS_ENABLED(CONFIG_KASAN)) { tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
? _REGION1_SIZE if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
: _REGION2_SIZE; vmax = _REGION2_SIZE; /* 3-level kernel page table */
} else { else
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; vmax = _REGION1_SIZE; /* 4-level kernel page table */
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
vmax = _REGION2_SIZE; /* 3-level kernel page table */
else
vmax = _REGION1_SIZE; /* 4-level kernel page table */
}
if (is_prot_virt_host()) if (is_prot_virt_host())
adjust_to_uv_max(&vmax); adjust_to_uv_max(&vmax);
#ifdef CONFIG_KASAN
vmax = kasan_vmax;
#endif
/* module area is at the end of the kernel address space. */ /* module area is at the end of the kernel address space. */
MODULES_END = vmax; MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN; MODULES_VADDR = MODULES_END - MODULES_LEN;
...@@ -586,9 +590,14 @@ static void __init setup_memory_end(void) ...@@ -586,9 +590,14 @@ static void __init setup_memory_end(void)
/* Take care that memory_end is set and <= vmemmap */ /* Take care that memory_end is set and <= vmemmap */
memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap); memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
/* fit in kasan shadow memory region between 1:1 and vmemmap */
memory_end = min(memory_end, KASAN_SHADOW_START); memory_end = min(memory_end, KASAN_SHADOW_START);
vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END); #endif
vmemmap_size = SECTION_ALIGN_UP(memory_end / PAGE_SIZE) * sizeof(struct page);
#ifdef CONFIG_KASAN
/* move vmemmap above kasan shadow only if stands in a way */
if (KASAN_SHADOW_END > (unsigned long)vmemmap &&
(unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START)
vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
#endif #endif
max_pfn = max_low_pfn = PFN_DOWN(memory_end); max_pfn = max_low_pfn = PFN_DOWN(memory_end);
memblock_remove(memory_end, ULONG_MAX); memblock_remove(memory_end, ULONG_MAX);
...@@ -1133,8 +1142,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1133,8 +1142,7 @@ void __init setup_arch(char **cmdline_p)
free_mem_detect_info(); free_mem_detect_info();
remove_oldmem(); remove_oldmem();
if (is_prot_virt_host()) setup_uv();
setup_uv();
setup_memory_end(); setup_memory_end();
setup_memory(); setup_memory();
dma_contiguous_reserve(memory_end); dma_contiguous_reserve(memory_end);
...@@ -1178,7 +1186,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1178,7 +1186,7 @@ void __init setup_arch(char **cmdline_p)
if (IS_ENABLED(CONFIG_EXPOLINE)) if (IS_ENABLED(CONFIG_EXPOLINE))
nospec_init_branches(); nospec_init_branches();
/* Setup zfcpdump support */ /* Setup zfcp/nvme dump support */
setup_zfcpdump(); setup_zfcpdump();
/* Add system specific data to the random pool */ /* Add system specific data to the random pool */
......
...@@ -606,14 +606,14 @@ int smp_store_status(int cpu) ...@@ -606,14 +606,14 @@ int smp_store_status(int cpu)
/* /*
* Collect CPU state of the previous, crashed system. * Collect CPU state of the previous, crashed system.
* There are four cases: * There are four cases:
* 1) standard zfcp dump * 1) standard zfcp/nvme dump
* condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
* The state for all CPUs except the boot CPU needs to be collected * The state for all CPUs except the boot CPU needs to be collected
* with sigp stop-and-store-status. The boot CPU state is located in * with sigp stop-and-store-status. The boot CPU state is located in
* the absolute lowcore of the memory stored in the HSA. The zcore code * the absolute lowcore of the memory stored in the HSA. The zcore code
* will copy the boot CPU state from the HSA. * will copy the boot CPU state from the HSA.
* 2) stand-alone kdump for SCSI (zfcp dump with swapped memory) * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
* condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
* The state for all CPUs except the boot CPU needs to be collected * The state for all CPUs except the boot CPU needs to be collected
* with sigp stop-and-store-status. The firmware or the boot-loader * with sigp stop-and-store-status. The firmware or the boot-loader
* stored the registers of the boot CPU in the absolute lowcore in the * stored the registers of the boot CPU in the absolute lowcore in the
...@@ -660,7 +660,7 @@ void __init smp_save_dump_cpus(void) ...@@ -660,7 +660,7 @@ void __init smp_save_dump_cpus(void)
unsigned long page; unsigned long page;
bool is_boot_cpu; bool is_boot_cpu;
if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP)) if (!(OLDMEM_BASE || is_ipl_type_dump()))
/* No previous system present, normal boot. */ /* No previous system present, normal boot. */
return; return;
/* Allocate a page as dumping area for the store status sigps */ /* Allocate a page as dumping area for the store status sigps */
...@@ -686,7 +686,7 @@ void __init smp_save_dump_cpus(void) ...@@ -686,7 +686,7 @@ void __init smp_save_dump_cpus(void)
/* Get the vector registers */ /* Get the vector registers */
smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page); smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
/* /*
* For a zfcp dump OLDMEM_BASE == NULL and the registers * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers
* of the boot CPU are stored in the HSA. To retrieve * of the boot CPU are stored in the HSA. To retrieve
* these registers an SCLP request is required which is * these registers an SCLP request is required which is
* done by drivers/s390/char/zcore.c:init_cpu_info() * done by drivers/s390/char/zcore.c:init_cpu_info()
......
This diff is collapsed.
...@@ -26,33 +26,10 @@ int __bootdata_preserved(prot_virt_guest); ...@@ -26,33 +26,10 @@ int __bootdata_preserved(prot_virt_guest);
struct uv_info __bootdata_preserved(uv_info); struct uv_info __bootdata_preserved(uv_info);
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
int prot_virt_host; int __bootdata_preserved(prot_virt_host);
EXPORT_SYMBOL(prot_virt_host); EXPORT_SYMBOL(prot_virt_host);
EXPORT_SYMBOL(uv_info); EXPORT_SYMBOL(uv_info);
static int __init prot_virt_setup(char *val)
{
bool enabled;
int rc;
rc = kstrtobool(val, &enabled);
if (!rc && enabled)
prot_virt_host = 1;
if (is_prot_virt_guest() && prot_virt_host) {
prot_virt_host = 0;
pr_warn("Protected virtualization not available in protected guests.");
}
if (prot_virt_host && !test_facility(158)) {
prot_virt_host = 0;
pr_warn("Protected virtualization not supported by the hardware.");
}
return rc;
}
early_param("prot_virt", prot_virt_setup);
static int __init uv_init(unsigned long stor_base, unsigned long stor_len) static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
{ {
struct uv_cb_init uvcb = { struct uv_cb_init uvcb = {
...@@ -74,6 +51,24 @@ void __init setup_uv(void) ...@@ -74,6 +51,24 @@ void __init setup_uv(void)
{ {
unsigned long uv_stor_base; unsigned long uv_stor_base;
/*
* keep these conditions in line with kasan init code has_uv_sec_stor_limit()
*/
if (!is_prot_virt_host())
return;
if (is_prot_virt_guest()) {
prot_virt_host = 0;
pr_warn("Protected virtualization not available in protected guests.");
return;
}
if (!test_facility(158)) {
prot_virt_host = 0;
pr_warn("Protected virtualization not supported by the hardware.");
return;
}
uv_stor_base = (unsigned long)memblock_alloc_try_nid( uv_stor_base = (unsigned long)memblock_alloc_try_nid(
uv_info.uv_base_stor_len, SZ_1M, SZ_2G, uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
...@@ -98,7 +93,8 @@ void __init setup_uv(void) ...@@ -98,7 +93,8 @@ void __init setup_uv(void)
void adjust_to_uv_max(unsigned long *vmax) void adjust_to_uv_max(unsigned long *vmax)
{ {
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr); if (uv_info.max_sec_stor_addr)
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
} }
/* /*
...@@ -118,6 +114,26 @@ static int uv_pin_shared(unsigned long paddr) ...@@ -118,6 +114,26 @@ static int uv_pin_shared(unsigned long paddr)
return 0; return 0;
} }
/*
* Requests the Ultravisor to destroy a guest page and make it
* accessible to the host. The destroy clears the page instead of
* exporting.
*
* @paddr: Absolute host address of page to be destroyed
*/
int uv_destroy_page(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_DESTR_SEC_STOR,
.header.len = sizeof(uvcb),
.paddr = paddr
};
if (uv_call(0, (u64)&uvcb))
return -EINVAL;
return 0;
}
/* /*
* Requests the Ultravisor to encrypt a guest page and make it * Requests the Ultravisor to encrypt a guest page and make it
* accessible to the host for paging (export). * accessible to the host for paging (export).
......
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#include <linux/security.h> #include <linux/security.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/binfmts.h>
#include <vdso/datapage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -96,35 +98,12 @@ static union { ...@@ -96,35 +98,12 @@ static union {
struct vdso_data data; struct vdso_data data;
u8 page[PAGE_SIZE]; u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data; } vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = &vdso_data_store.data; struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data;
/*
* Setup vdso data page.
*/
static void __init vdso_init_data(struct vdso_data *vd)
{
vd->ectg_available = test_facility(31);
}
/* /*
* Allocate/free per cpu vdso data. * Allocate/free per cpu vdso data.
*/ */
#define SEGMENT_ORDER 2 #define SEGMENT_ORDER 2
/*
* The initial vdso_data structure for the boot CPU. Eventually
* it is replaced with a properly allocated structure in vdso_init.
* This is necessary because a valid S390_lowcore.vdso_per_cpu_data
* pointer is required to be able to return from an interrupt or
* program check. See the exit paths in entry.S.
*/
struct vdso_data boot_vdso_data __initdata;
void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
{
lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
}
int vdso_alloc_per_cpu(struct lowcore *lowcore) int vdso_alloc_per_cpu(struct lowcore *lowcore)
{ {
unsigned long segment_table, page_table, page_frame; unsigned long segment_table, page_table, page_frame;
...@@ -246,8 +225,6 @@ static int __init vdso_init(void) ...@@ -246,8 +225,6 @@ static int __init vdso_init(void)
{ {
int i; int i;
vdso_init_data(vdso_data);
/* Calculate the size of the 64 bit vDSO */ /* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start vdso64_pages = ((&vdso64_end - &vdso64_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# List of files in the vdso, has to be asm only for now # List of files in the vdso
KCOV_INSTRUMENT := n KCOV_INSTRUMENT := n
ARCH_REL_TYPE_ABS := R_390_COPY|R_390_GLOB_DAT|R_390_JMP_SLOT|R_390_RELATIVE
ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT
obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o include $(srctree)/lib/vdso/Makefile
obj-vdso64 = vdso_user_wrapper.o note.o getcpu.o
obj-cvdso64 = vdso64_generic.o
CFLAGS_REMOVE_vdso64_generic.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
# Build rules # Build rules
targets := $(obj-vdso64) vdso64.so vdso64.so.dbg targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so vdso64.so.dbg
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64))
KBUILD_AFLAGS += -DBUILD_VDSO KBUILD_AFLAGS += -DBUILD_VDSO
KBUILD_CFLAGS += -DBUILD_VDSO KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS)) KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_64 += -m64 -s KBUILD_AFLAGS_64 += -m64 -s
...@@ -37,7 +43,7 @@ KASAN_SANITIZE := n ...@@ -37,7 +43,7 @@ KASAN_SANITIZE := n
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first # link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE
$(call if_changed,ld) $(call if_changed,ld)
# strip rule for the .so file # strip rule for the .so file
...@@ -49,9 +55,14 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE ...@@ -49,9 +55,14 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(obj-vdso64): %.o: %.S FORCE $(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as) $(call if_changed_dep,vdso64as)
$(obj-cvdso64): %.o: %.c FORCE
$(call if_changed_dep,vdso64cc)
# actual build commands # actual build commands
quiet_cmd_vdso64as = VDSO64A $@ quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
quiet_cmd_vdso64cc = VDSO64C $@
cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
# install commands for the unstripped file # install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@ quiet_cmd_vdso_install = INSTALL $@
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_getres() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_clock_getres
.type __kernel_clock_getres,@function
__kernel_clock_getres:
CFI_STARTPROC
larl %r1,3f
lg %r0,0(%r1)
cghi %r2,__CLOCK_REALTIME_COARSE
je 0f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 0f
larl %r1,_vdso_data
llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1)
cghi %r2,__CLOCK_REALTIME
je 0f
cghi %r2,__CLOCK_MONOTONIC
je 0f
cghi %r2,__CLOCK_THREAD_CPUTIME_ID
je 0f
cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
jne 2f
larl %r5,_vdso_data
icm %r0,15,__LC_ECTG_OK(%r5)
jz 2f
0: ltgr %r3,%r3
jz 1f /* res == NULL */
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
stg %r0,8(%r3) /* store tp->tv_usec */
1: lghi %r2,0
br %r14
2: lghi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
CFI_ENDPROC
3: .quad __CLOCK_COARSE_RES
.size __kernel_clock_getres,.-__kernel_clock_getres
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_gettime() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
#include <asm/ptrace.h>
.text
.align 4
.globl __kernel_clock_gettime
.type __kernel_clock_gettime,@function
__kernel_clock_gettime:
CFI_STARTPROC
aghi %r15,-16
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME_COARSE
je 4f
cghi %r2,__CLOCK_REALTIME
je 5f
cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 3f
cghi %r2,__CLOCK_MONOTONIC
jne 12f
/* CLOCK_MONOTONIC */
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
stcke 0(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r0,__VDSO_WTOM_SEC(%r5)
lg %r1,1(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_WTOM_NSEC(%r5)
srlg %r1,%r1,0(%r2) /* >> tk->shift */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
larl %r5,13f
1: clg %r1,0(%r5)
jl 2f
slg %r1,0(%r5)
aghi %r0,1
j 1b
2: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
aghi %r15,16
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* CLOCK_MONOTONIC_COARSE */
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 3b
lg %r0,__VDSO_WTOM_CRS_SEC(%r5)
lg %r1,__VDSO_WTOM_CRS_NSEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 3b
j 2b
/* CLOCK_REALTIME_COARSE */
4: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 4b
lg %r0,__VDSO_XTIME_CRS_SEC(%r5)
lg %r1,__VDSO_XTIME_CRS_NSEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 4b
j 7f
/* CLOCK_REALTIME */
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stcke 0(%r15) /* Store TOD clock */
lg %r1,1(%r15)
lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
slgr %r0,%r1 /* now - ts_steering_end */
ltgr %r0,%r0 /* past end of steering ? */
jm 17f
srlg %r0,%r0,15 /* 1 per 2^16 */
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
jz 18f
lcgr %r0,%r0 /* negative TOD offset */
18: algr %r1,%r0 /* add steering offset */
17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
srlg %r1,%r1,0(%r2) /* >> tk->shift */
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 5b
larl %r5,13f
6: clg %r1,0(%r5)
jl 7f
slg %r1,0(%r5)
aghi %r0,1
j 6b
7: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
aghi %r15,16
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* CPUCLOCK_VIRT for this thread */
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
9: lghi %r4,0
icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
sacf 256 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
sacf 0
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */
lgr %r4,%r1
larl %r5,13f
srlg %r1,%r1,9 /* divide by 1000000000 */
mlg %r0,8(%r5)
srlg %r0,%r0,11 /* r0 = tv_sec */
stg %r0,0(%r3)
msg %r0,0(%r5) /* calculate tv_nsec */
slgr %r4,%r0 /* r4 = tv_nsec */
stg %r4,8(%r3)
lghi %r2,0
aghi %r15,16
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* Fallback to system call */
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
12: lghi %r1,__NR_clock_gettime
svc 0
aghi %r15,16
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC
13: .quad 1000000000
14: .quad 19342813113834067
.size __kernel_clock_gettime,.-__kernel_clock_gettime
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of gettimeofday() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
#include <asm/ptrace.h>
.text
.align 4
.globl __kernel_gettimeofday
.type __kernel_gettimeofday,@function
__kernel_gettimeofday:
CFI_STARTPROC
aghi %r15,-16
CFI_ADJUST_CFA_OFFSET 16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
larl %r5,_vdso_data
0: ltgr %r3,%r3 /* check if tz is NULL */
je 1f
mvc 0(8,%r3),__VDSO_TIMEZONE(%r5)
1: ltgr %r2,%r2 /* check if tv is NULL */
je 4f
lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
stcke 0(%r15) /* Store TOD clock */
lg %r1,1(%r15)
lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
slgr %r0,%r1 /* now - ts_steering_end */
ltgr %r0,%r0 /* past end of steering ? */
jm 6f
srlg %r0,%r0,15 /* 1 per 2^16 */
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
jz 7f
lcgr %r0,%r0 /* negative TOD offset */
7: algr %r1,%r0 /* add steering offset */
6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
srlg %r1,%r1,0(%r5) /* >> tk->shift */
larl %r5,5f
2: clg %r1,0(%r5)
jl 3f
slg %r1,0(%r5)
aghi %r0,1
j 2b
3: stg %r0,0(%r2) /* store tv->tv_sec */
slgr %r0,%r0 /* tv_nsec -> tv_usec */
ml %r0,8(%r5)
srlg %r0,%r0,6
stg %r0,8(%r2) /* store tv->tv_usec */
4: lghi %r2,0
aghi %r15,16
CFI_ADJUST_CFA_OFFSET -16
CFI_RESTORE 15
br %r14
CFI_ENDPROC
5: .quad 1000000000
.long 274877907
.size __kernel_gettimeofday,.-__kernel_gettimeofday
// SPDX-License-Identifier: GPL-2.0
#include "../../../../lib/vdso/gettimeofday.c"
int __s390_vdso_gettimeofday(struct __kernel_old_timeval *tv,
struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
int __s390_vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime(clock, ts);
}
int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_getres(clock, ts);
}
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/vdso.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
#include <asm/dwarf.h>
#include <asm/ptrace.h>
#define WRAPPER_FRAME_SIZE (STACK_FRAME_OVERHEAD+8)
/*
* Older glibc version called vdso without allocating a stackframe. This wrapper
* is just used to allocate a stackframe. See
* https://sourceware.org/git/?p=glibc.git;a=commit;h=478593e6374f3818da39332260dc453cb19cfa1e
* for details.
*/
.macro vdso_func func
.globl __kernel_\func
.type __kernel_\func,@function
.align 8
__kernel_\func:
CFI_STARTPROC
aghi %r15,-WRAPPER_FRAME_SIZE
CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
stg %r14,STACK_FRAME_OVERHEAD(%r15)
brasl %r14,__s390_vdso_\func
lg %r14,STACK_FRAME_OVERHEAD(%r15)
aghi %r15,WRAPPER_FRAME_SIZE
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC
.size __kernel_\func,.-__kernel_\func
.endm
vdso_func gettimeofday
vdso_func clock_getres
vdso_func clock_gettime
...@@ -333,7 +333,7 @@ EXPORT_SYMBOL(memchr); ...@@ -333,7 +333,7 @@ EXPORT_SYMBOL(memchr);
* memcmp - Compare two areas of memory * memcmp - Compare two areas of memory
* @s1: One area of memory * @s1: One area of memory
* @s2: Another area of memory * @s2: Another area of memory
* @count: The size of the area. * @n: The size of the area.
*/ */
#ifdef __HAVE_ARCH_MEMCMP #ifdef __HAVE_ARCH_MEMCMP
int memcmp(const void *s1, const void *s2, size_t n) int memcmp(const void *s1, const void *s2, size_t n)
......
...@@ -8,7 +8,7 @@ obj-y += page-states.o pageattr.o pgtable.o pgalloc.o ...@@ -8,7 +8,7 @@ obj-y += page-states.o pageattr.o pgtable.o pgalloc.o
obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o obj-$(CONFIG_PGSTE) += gmap.o
KASAN_SANITIZE_kasan_init.o := n KASAN_SANITIZE_kasan_init.o := n
......
This diff is collapsed.
...@@ -2679,7 +2679,7 @@ static int __s390_reset_acc(pte_t *ptep, unsigned long addr, ...@@ -2679,7 +2679,7 @@ static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
pte_t pte = READ_ONCE(*ptep); pte_t pte = READ_ONCE(*ptep);
if (pte_present(pte)) if (pte_present(pte))
WARN_ON_ONCE(uv_convert_from_secure(pte_val(pte) & PAGE_MASK)); WARN_ON_ONCE(uv_destroy_page(pte_val(pte) & PAGE_MASK));
return 0; return 0;
} }
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/ptdump.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/tlb.h> #include <asm/tlb.h>
...@@ -129,6 +130,7 @@ void mark_rodata_ro(void) ...@@ -129,6 +130,7 @@ void mark_rodata_ro(void)
set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
debug_checkwx();
} }
int set_memory_encrypted(unsigned long addr, int numpages) int set_memory_encrypted(unsigned long addr, int numpages)
......
...@@ -11,7 +11,9 @@ ...@@ -11,7 +11,9 @@
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/uv.h>
unsigned long kasan_vmax;
static unsigned long segment_pos __initdata; static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata; static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata; static unsigned long pgalloc_pos __initdata;
...@@ -99,8 +101,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -99,8 +101,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO); pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
if (!has_nx) if (!has_nx)
pgt_prot_zero &= ~_PAGE_NOEXEC; pgt_prot_zero &= ~_PAGE_NOEXEC;
pgt_prot = pgprot_val(PAGE_KERNEL_EXEC); pgt_prot = pgprot_val(PAGE_KERNEL);
sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC); sgt_prot = pgprot_val(SEGMENT_KERNEL);
if (!has_nx || mode == POPULATE_ONE2ONE) {
pgt_prot &= ~_PAGE_NOEXEC;
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
}
while (address < end) { while (address < end) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
...@@ -252,14 +258,31 @@ static void __init kasan_early_detect_facilities(void) ...@@ -252,14 +258,31 @@ static void __init kasan_early_detect_facilities(void)
} }
} }
static bool __init has_uv_sec_stor_limit(void)
{
/*
* keep these conditions in line with setup_uv()
*/
if (!is_prot_virt_host())
return false;
if (is_prot_virt_guest())
return false;
if (!test_facility(158))
return false;
return !!uv_info.max_sec_stor_addr;
}
void __init kasan_early_init(void) void __init kasan_early_init(void)
{ {
unsigned long untracked_mem_end; unsigned long untracked_mem_end;
unsigned long shadow_alloc_size; unsigned long shadow_alloc_size;
unsigned long vmax_unlimited;
unsigned long initrd_end; unsigned long initrd_end;
unsigned long asce_type; unsigned long asce_type;
unsigned long memsize; unsigned long memsize;
unsigned long vmax;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
pte_t pte_z; pte_t pte_z;
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY); pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
...@@ -287,7 +310,9 @@ void __init kasan_early_init(void) ...@@ -287,7 +310,9 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
crst_table_init((unsigned long *)early_pg_dir, crst_table_init((unsigned long *)early_pg_dir,
_REGION2_ENTRY_EMPTY); _REGION2_ENTRY_EMPTY);
untracked_mem_end = vmax = _REGION1_SIZE; untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
if (has_uv_sec_stor_limit())
kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
asce_type = _ASCE_TYPE_REGION2; asce_type = _ASCE_TYPE_REGION2;
} else { } else {
/* 3 level paging */ /* 3 level paging */
...@@ -295,7 +320,7 @@ void __init kasan_early_init(void) ...@@ -295,7 +320,7 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
crst_table_init((unsigned long *)early_pg_dir, crst_table_init((unsigned long *)early_pg_dir,
_REGION3_ENTRY_EMPTY); _REGION3_ENTRY_EMPTY);
untracked_mem_end = vmax = _REGION2_SIZE; untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION2_SIZE;
asce_type = _ASCE_TYPE_REGION3; asce_type = _ASCE_TYPE_REGION3;
} }
...@@ -365,17 +390,20 @@ void __init kasan_early_init(void) ...@@ -365,17 +390,20 @@ void __init kasan_early_init(void)
/* populate kasan shadow (for identity mapping and zero page mapping) */ /* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES)) if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = vmax - MODULES_LEN; untracked_mem_end = kasan_vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_mem_end = vmax - vmalloc_size - MODULES_LEN; untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */ /* shallowly populate kasan shadow for vmalloc and modules */
kasan_early_vmemmap_populate(__sha(untracked_mem_end), kasan_early_vmemmap_populate(__sha(untracked_mem_end),
__sha(vmax), POPULATE_SHALLOW); __sha(kasan_vmax), POPULATE_SHALLOW);
} }
/* populate kasan shadow for untracked memory */ /* populate kasan shadow for untracked memory */
kasan_early_vmemmap_populate(__sha(max_physmem_end), kasan_early_vmemmap_populate(__sha(max_physmem_end),
__sha(untracked_mem_end), __sha(untracked_mem_end),
POPULATE_ZERO_SHADOW); POPULATE_ZERO_SHADOW);
kasan_early_vmemmap_populate(__sha(kasan_vmax),
__sha(vmax_unlimited),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */ /* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos; pgalloc_freeable = pgalloc_pos;
/* populate identity mapping */ /* populate identity mapping */
......
...@@ -278,7 +278,7 @@ static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -278,7 +278,7 @@ static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
return rc; return rc;
} }
static DEFINE_MUTEX(cpa_mutex); DEFINE_MUTEX(cpa_mutex);
static int change_page_attr(unsigned long addr, unsigned long end, static int change_page_attr(unsigned long addr, unsigned long end,
unsigned long flags) unsigned long flags)
......
...@@ -24,6 +24,26 @@ ...@@ -24,6 +24,26 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/page-states.h> #include <asm/page-states.h>
pgprot_t pgprot_writecombine(pgprot_t prot)
{
/*
* mio_wb_bit_mask may be set on a different CPU, but it is only set
* once at init and only read afterwards.
*/
return __pgprot(pgprot_val(prot) | mio_wb_bit_mask);
}
EXPORT_SYMBOL_GPL(pgprot_writecombine);
pgprot_t pgprot_writethrough(pgprot_t prot)
{
/*
* mio_wb_bit_mask may be set on a different CPU, but it is only set
* once at init and only read afterwards.
*/
return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
}
EXPORT_SYMBOL_GPL(pgprot_writethrough);
static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, int nodat) pte_t *ptep, int nodat)
{ {
......
...@@ -6,3 +6,4 @@ ...@@ -6,3 +6,4 @@
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \ obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \ pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
pci_bus.o pci_bus.o
obj-$(CONFIG_PCI_IOV) += pci_iov.o
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/pci_dma.h> #include <asm/pci_dma.h>
#include "pci_bus.h" #include "pci_bus.h"
#include "pci_iov.h"
/* list of all detected zpci devices */ /* list of all detected zpci devices */
static LIST_HEAD(zpci_list); static LIST_HEAD(zpci_list);
...@@ -226,7 +227,7 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count) ...@@ -226,7 +227,7 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
zpci_memcpy_toio(to, from, count); zpci_memcpy_toio(to, from, count);
} }
void __iomem *ioremap(phys_addr_t addr, size_t size) static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
{ {
unsigned long offset, vaddr; unsigned long offset, vaddr;
struct vm_struct *area; struct vm_struct *area;
...@@ -247,14 +248,37 @@ void __iomem *ioremap(phys_addr_t addr, size_t size) ...@@ -247,14 +248,37 @@ void __iomem *ioremap(phys_addr_t addr, size_t size)
return NULL; return NULL;
vaddr = (unsigned long) area->addr; vaddr = (unsigned long) area->addr;
if (ioremap_page_range(vaddr, vaddr + size, addr, PAGE_KERNEL)) { if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
free_vm_area(area); free_vm_area(area);
return NULL; return NULL;
} }
return (void __iomem *) ((unsigned long) area->addr + offset); return (void __iomem *) ((unsigned long) area->addr + offset);
} }
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
{
return __ioremap(addr, size, __pgprot(prot));
}
EXPORT_SYMBOL(ioremap_prot);
void __iomem *ioremap(phys_addr_t addr, size_t size)
{
return __ioremap(addr, size, PAGE_KERNEL);
}
EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
{
return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
}
EXPORT_SYMBOL(ioremap_wc);
void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
{
return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
}
EXPORT_SYMBOL(ioremap_wt);
void iounmap(volatile void __iomem *addr) void iounmap(volatile void __iomem *addr)
{ {
if (static_branch_likely(&have_mio)) if (static_branch_likely(&have_mio))
...@@ -390,15 +414,6 @@ static struct pci_ops pci_root_ops = { ...@@ -390,15 +414,6 @@ static struct pci_ops pci_root_ops = {
.write = pci_write, .write = pci_write,
}; };
#ifdef CONFIG_PCI_IOV
static struct resource iov_res = {
.name = "PCI IOV res",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
#endif
static void zpci_map_resources(struct pci_dev *pdev) static void zpci_map_resources(struct pci_dev *pdev)
{ {
struct zpci_dev *zdev = to_zpci(pdev); struct zpci_dev *zdev = to_zpci(pdev);
...@@ -419,16 +434,7 @@ static void zpci_map_resources(struct pci_dev *pdev) ...@@ -419,16 +434,7 @@ static void zpci_map_resources(struct pci_dev *pdev)
pdev->resource[i].end = pdev->resource[i].start + len - 1; pdev->resource[i].end = pdev->resource[i].start + len - 1;
} }
#ifdef CONFIG_PCI_IOV zpci_iov_map_resources(pdev);
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
int bar = i + PCI_IOV_RESOURCES;
len = pci_resource_len(pdev, bar);
if (!len)
continue;
pdev->resource[bar].parent = &iov_res;
}
#endif
} }
static void zpci_unmap_resources(struct pci_dev *pdev) static void zpci_unmap_resources(struct pci_dev *pdev)
...@@ -684,7 +690,7 @@ void zpci_remove_device(struct zpci_dev *zdev) ...@@ -684,7 +690,7 @@ void zpci_remove_device(struct zpci_dev *zdev)
pdev = pci_get_slot(zbus->bus, zdev->devfn); pdev = pci_get_slot(zbus->bus, zdev->devfn);
if (pdev) { if (pdev) {
if (pdev->is_virtfn) if (pdev->is_virtfn)
return zpci_remove_virtfn(pdev, zdev->vfn); return zpci_iov_remove_virtfn(pdev, zdev->vfn);
pci_stop_and_remove_bus_device_locked(pdev); pci_stop_and_remove_bus_device_locked(pdev);
} }
} }
...@@ -788,6 +794,9 @@ static int zpci_mem_init(void) ...@@ -788,6 +794,9 @@ static int zpci_mem_init(void)
if (!zpci_iomap_bitmap) if (!zpci_iomap_bitmap)
goto error_iomap_bitmap; goto error_iomap_bitmap;
if (static_branch_likely(&have_mio))
clp_setup_writeback_mio();
return 0; return 0;
error_iomap_bitmap: error_iomap_bitmap:
kfree(zpci_iomap_start); kfree(zpci_iomap_start);
...@@ -885,9 +894,3 @@ static int __init pci_base_init(void) ...@@ -885,9 +894,3 @@ static int __init pci_base_init(void)
return rc; return rc;
} }
subsys_initcall_sync(pci_base_init); subsys_initcall_sync(pci_base_init);
void zpci_rescan(void)
{
if (zpci_is_enabled())
clp_rescan_pci_devices_simple(NULL);
}
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/pci_dma.h> #include <asm/pci_dma.h>
#include "pci_bus.h" #include "pci_bus.h"
#include "pci_iov.h"
static LIST_HEAD(zbus_list); static LIST_HEAD(zbus_list);
static DEFINE_SPINLOCK(zbus_list_lock); static DEFINE_SPINLOCK(zbus_list_lock);
...@@ -126,69 +127,6 @@ static struct zpci_bus *zpci_bus_alloc(int pchid) ...@@ -126,69 +127,6 @@ static struct zpci_bus *zpci_bus_alloc(int pchid)
return zbus; return zbus;
} }
#ifdef CONFIG_PCI_IOV
static int zpci_bus_link_virtfn(struct pci_dev *pdev,
struct pci_dev *virtfn, int vfid)
{
int rc;
rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
if (rc)
return rc;
virtfn->is_virtfn = 1;
virtfn->multifunction = 0;
virtfn->physfn = pci_dev_get(pdev);
return 0;
}
static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
struct pci_dev *virtfn, int vfn)
{
int i, cand_devfn;
struct zpci_dev *zdev;
struct pci_dev *pdev;
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
int rc = 0;
if (!zbus->multifunction)
return 0;
/* If the parent PF for the given VF is also configured in the
* instance, it must be on the same zbus.
* We can then identify the parent PF by checking what
* devfn the VF would have if it belonged to that PF using the PF's
* stride and offset. Only if this candidate devfn matches the
* actual devfn will we link both functions.
*/
for (i = 0; i < ZPCI_FUNCTIONS_PER_BUS; i++) {
zdev = zbus->function[i];
if (zdev && zdev->is_physfn) {
pdev = pci_get_slot(zbus->bus, zdev->devfn);
if (!pdev)
continue;
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
if (cand_devfn == virtfn->devfn) {
rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
/* balance pci_get_slot() */
pci_dev_put(pdev);
break;
}
/* balance pci_get_slot() */
pci_dev_put(pdev);
}
}
return rc;
}
#else
static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
struct pci_dev *virtfn, int vfn)
{
return 0;
}
#endif
void pcibios_bus_add_device(struct pci_dev *pdev) void pcibios_bus_add_device(struct pci_dev *pdev)
{ {
struct zpci_dev *zdev = to_zpci(pdev); struct zpci_dev *zdev = to_zpci(pdev);
...@@ -198,7 +136,7 @@ void pcibios_bus_add_device(struct pci_dev *pdev) ...@@ -198,7 +136,7 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
* perform PF/VF linking. * perform PF/VF linking.
*/ */
if (zdev->vfn) if (zdev->vfn)
zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn); zpci_iov_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
} }
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops); int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops);
void zpci_bus_device_unregister(struct zpci_dev *zdev); void zpci_bus_device_unregister(struct zpci_dev *zdev);
int zpci_bus_init(void);
void zpci_release_device(struct kref *kref); void zpci_release_device(struct kref *kref);
static inline void zpci_zdev_put(struct zpci_dev *zdev) static inline void zpci_zdev_put(struct zpci_dev *zdev)
...@@ -30,15 +29,3 @@ static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus, ...@@ -30,15 +29,3 @@ static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn]; return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
} }
#ifdef CONFIG_PCI_IOV
static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn)
{
pci_lock_rescan_remove();
/* Linux' vfid's start at 0 vfn at 1 */
pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
pci_unlock_rescan_remove();
}
#else /* CONFIG_PCI_IOV */
static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {}
#endif /* CONFIG_PCI_IOV */
...@@ -244,6 +244,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured) ...@@ -244,6 +244,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
return rc; return rc;
} }
static int clp_refresh_fh(u32 fid);
/* /*
* Enable/Disable a given PCI function and update its function handle if * Enable/Disable a given PCI function and update its function handle if
* necessary * necessary
...@@ -286,7 +287,41 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command) ...@@ -286,7 +287,41 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
} else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY && } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
rrb->response.fh == 0) { rrb->response.fh == 0) {
/* Function is already in desired state - update handle */ /* Function is already in desired state - update handle */
rc = clp_rescan_pci_devices_simple(&fid); rc = clp_refresh_fh(fid);
}
clp_free_block(rrb);
return rc;
}
int clp_setup_writeback_mio(void)
{
struct clp_req_rsp_slpc_pci *rrb;
u8 wb_bit_pos;
int rc;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_SLPC;
rrb->response.hdr.len = sizeof(rrb->response);
rc = clp_req(rrb, CLP_LPS_PCI);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
if (rrb->response.vwb) {
wb_bit_pos = rrb->response.mio_wb;
set_bit_inv(wb_bit_pos, &mio_wb_bit_mask);
zpci_dbg(3, "wb bit: %d\n", wb_bit_pos);
} else {
zpci_dbg(3, "wb bit: n.a.\n");
}
} else {
zpci_err("SLPC PCI:\n");
zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
} }
clp_free_block(rrb); clp_free_block(rrb);
return rc; return rc;
...@@ -374,24 +409,6 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) ...@@ -374,24 +409,6 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
clp_add_pci_device(entry->fid, entry->fh, entry->config_state); clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
} }
static void __clp_update(struct clp_fh_list_entry *entry, void *data)
{
struct zpci_dev *zdev;
u32 *fid = data;
if (!entry->vendor_id)
return;
if (fid && *fid != entry->fid)
return;
zdev = get_zdev_by_fid(entry->fid);
if (!zdev)
return;
zdev->fh = entry->fh;
}
int clp_scan_pci_devices(void) int clp_scan_pci_devices(void)
{ {
struct clp_req_rsp_list_pci *rrb; struct clp_req_rsp_list_pci *rrb;
...@@ -407,27 +424,25 @@ int clp_scan_pci_devices(void) ...@@ -407,27 +424,25 @@ int clp_scan_pci_devices(void)
return rc; return rc;
} }
int clp_rescan_pci_devices(void) static void __clp_refresh_fh(struct clp_fh_list_entry *entry, void *data)
{ {
struct clp_req_rsp_list_pci *rrb; struct zpci_dev *zdev;
int rc; u32 fid = *((u32 *)data);
zpci_remove_reserved_devices();
rrb = clp_alloc_block(GFP_KERNEL); if (!entry->vendor_id || fid != entry->fid)
if (!rrb) return;
return -ENOMEM;
rc = clp_list_pci(rrb, NULL, __clp_add); zdev = get_zdev_by_fid(fid);
if (!zdev)
return;
clp_free_block(rrb); zdev->fh = entry->fh;
return rc;
} }
/* Rescan PCI functions and refresh function handles. If fid is non-NULL only /*
* refresh the handle of the function matching @fid * Refresh the function handle of the function matching @fid
*/ */
int clp_rescan_pci_devices_simple(u32 *fid) static int clp_refresh_fh(u32 fid)
{ {
struct clp_req_rsp_list_pci *rrb; struct clp_req_rsp_list_pci *rrb;
int rc; int rc;
...@@ -436,7 +451,7 @@ int clp_rescan_pci_devices_simple(u32 *fid) ...@@ -436,7 +451,7 @@ int clp_rescan_pci_devices_simple(u32 *fid)
if (!rrb) if (!rrb)
return -ENOMEM; return -ENOMEM;
rc = clp_list_pci(rrb, fid, __clp_update); rc = clp_list_pci(rrb, &fid, __clp_refresh_fh);
clp_free_block(rrb); clp_free_block(rrb);
return rc; return rc;
...@@ -495,7 +510,7 @@ static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb) ...@@ -495,7 +510,7 @@ static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
} }
} }
static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb) static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc_pci *lpcb)
{ {
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request); unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
......
...@@ -152,7 +152,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) ...@@ -152,7 +152,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
} }
break; break;
case 0x0306: /* 0x308 or 0x302 for multiple devices */ case 0x0306: /* 0x308 or 0x302 for multiple devices */
clp_rescan_pci_devices(); zpci_remove_reserved_devices();
clp_scan_pci_devices();
break; break;
case 0x0308: /* Standby -> Reserved */ case 0x0308: /* Standby -> Reserved */
if (!zdev) if (!zdev)
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2020
*
* Author(s):
* Niklas Schnelle <schnelle@linux.ibm.com>
*
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pci_iov.h"
static struct resource iov_res = {
.name = "PCI IOV res",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
void zpci_iov_map_resources(struct pci_dev *pdev)
{
resource_size_t len;
int i;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
int bar = i + PCI_IOV_RESOURCES;
len = pci_resource_len(pdev, bar);
if (!len)
continue;
pdev->resource[bar].parent = &iov_res;
}
}
void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn)
{
pci_lock_rescan_remove();
/* Linux' vfid's start at 0 vfn at 1 */
pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
pci_unlock_rescan_remove();
}
static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, int vfid)
{
int rc;
rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
if (rc)
return rc;
virtfn->is_virtfn = 1;
virtfn->multifunction = 0;
virtfn->physfn = pci_dev_get(pdev);
return 0;
}
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
{
int i, cand_devfn;
struct zpci_dev *zdev;
struct pci_dev *pdev;
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
int rc = 0;
if (!zbus->multifunction)
return 0;
/* If the parent PF for the given VF is also configured in the
* instance, it must be on the same zbus.
* We can then identify the parent PF by checking what
* devfn the VF would have if it belonged to that PF using the PF's
* stride and offset. Only if this candidate devfn matches the
* actual devfn will we link both functions.
*/
for (i = 0; i < ZPCI_FUNCTIONS_PER_BUS; i++) {
zdev = zbus->function[i];
if (zdev && zdev->is_physfn) {
pdev = pci_get_slot(zbus->bus, zdev->devfn);
if (!pdev)
continue;
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
if (cand_devfn == virtfn->devfn) {
rc = zpci_iov_link_virtfn(pdev, virtfn, vfid);
/* balance pci_get_slot() */
pci_dev_put(pdev);
break;
}
/* balance pci_get_slot() */
pci_dev_put(pdev);
}
}
return rc;
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2020
*
* Author(s):
* Niklas Schnelle <schnelle@linux.ibm.com>
*
*/
#ifndef __S390_PCI_IOV_H
#define __S390_PCI_IOV_H
#ifdef CONFIG_PCI_IOV
void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn);
void zpci_iov_map_resources(struct pci_dev *pdev);
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn);
#else /* CONFIG_PCI_IOV */
static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {}
static inline void zpci_iov_map_resources(struct pci_dev *pdev) {}
static inline int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
{
return 0;
}
#endif /* CONFIG_PCI_IOV */
#endif /* __S390_PCI_IOV_h */
# SPDX-License-Identifier: GPL-2.0
chkbss-target ?= built-in.a
$(obj)/$(chkbss-target): chkbss
chkbss-files := $(addsuffix .chkbss, $(chkbss))
clean-files += $(chkbss-files)
PHONY += chkbss
chkbss: $(addprefix $(obj)/, $(chkbss-files))
quiet_cmd_chkbss = CHKBSS $<
cmd_chkbss = \
if ! $(OBJSIZE) --common $< | $(AWK) 'END { if ($$3) exit 1 }'; then \
echo "error: $< .bss section is not empty" >&2; exit 1; \
fi; \
touch $@;
$(obj)/%.o.chkbss: $(obj)/%.o
$(call cmd,chkbss)
...@@ -71,10 +71,26 @@ config ZCRYPT ...@@ -71,10 +71,26 @@ config ZCRYPT
help help
Select this option if you want to enable support for Select this option if you want to enable support for
s390 cryptographic adapters like: s390 cryptographic adapters like:
+ PCI-X Cryptographic Coprocessor (PCIXCC) + Crypto Express 2 up to 7 Coprocessor (CEXxC)
+ Crypto Express 2,3,4 or 5 Coprocessor (CEXxC) + Crypto Express 2 up to 7 Accelerator (CEXxA)
+ Crypto Express 2,3,4 or 5 Accelerator (CEXxA) + Crypto Express 4 up to 7 EP11 Coprocessor (CEXxP)
+ Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
config ZCRYPT_DEBUG
bool "Enable debug features for s390 cryptographic adapters"
default n
depends on DEBUG_KERNEL
depends on ZCRYPT
help
Say 'Y' here to enable some additional debug features on the
s390 cryptographic adapters driver.
There will be some more sysfs attributes displayed for ap cards
and queues and some flags on crypto requests are interpreted as
debugging messages to force error injection.
Do not enable on production level kernel build.
If unsure, say N.
config ZCRYPT_MULTIDEVNODES config ZCRYPT_MULTIDEVNODES
bool "Support for multiple zcrypt device nodes" bool "Support for multiple zcrypt device nodes"
......
...@@ -34,6 +34,8 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o ...@@ -34,6 +34,8 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_PCI) += sclp_pci.o obj-$(CONFIG_PCI) += sclp_pci.o
obj-$(subst m,y,$(CONFIG_ZCRYPT)) += sclp_ap.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o obj-$(CONFIG_VMCP) += vmcp.o
......
...@@ -978,7 +978,6 @@ static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty) ...@@ -978,7 +978,6 @@ static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty)
static int tty3215_open(struct tty_struct *tty, struct file * filp) static int tty3215_open(struct tty_struct *tty, struct file * filp)
{ {
struct raw3215_info *raw = tty->driver_data; struct raw3215_info *raw = tty->driver_data;
int retval;
tty_port_tty_set(&raw->port, tty); tty_port_tty_set(&raw->port, tty);
...@@ -986,11 +985,7 @@ static int tty3215_open(struct tty_struct *tty, struct file * filp) ...@@ -986,11 +985,7 @@ static int tty3215_open(struct tty_struct *tty, struct file * filp)
/* /*
* Start up 3215 device * Start up 3215 device
*/ */
retval = raw3215_startup(raw); return raw3215_startup(raw);
if (retval)
return retval;
return 0;
} }
/* /*
......
...@@ -110,7 +110,6 @@ struct raw3270_request { ...@@ -110,7 +110,6 @@ struct raw3270_request {
}; };
struct raw3270_request *raw3270_request_alloc(size_t size); struct raw3270_request *raw3270_request_alloc(size_t size);
struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
void raw3270_request_free(struct raw3270_request *); void raw3270_request_free(struct raw3270_request *);
void raw3270_request_reset(struct raw3270_request *); void raw3270_request_reset(struct raw3270_request *);
void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd); void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
......
...@@ -229,7 +229,7 @@ static inline void sclp_fill_core_info(struct sclp_core_info *info, ...@@ -229,7 +229,7 @@ static inline void sclp_fill_core_info(struct sclp_core_info *info,
#define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL) #define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp.facilities & 0x0400000000000000ULL) #define SCLP_HAS_CPU_RECONFIG (sclp.facilities & 0x0400000000000000ULL)
#define SCLP_HAS_PCI_RECONFIG (sclp.facilities & 0x0000000040000000ULL) #define SCLP_HAS_PCI_RECONFIG (sclp.facilities & 0x0000000040000000ULL)
#define SCLP_HAS_AP_RECONFIG (sclp.facilities & 0x0000000100000000ULL)
struct gds_subvector { struct gds_subvector {
u8 length; u8 length;
...@@ -305,9 +305,7 @@ int sclp_deactivate(void); ...@@ -305,9 +305,7 @@ int sclp_deactivate(void);
int sclp_reactivate(void); int sclp_reactivate(void);
int sclp_sync_request(sclp_cmdw_t command, void *sccb); int sclp_sync_request(sclp_cmdw_t command, void *sccb);
int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout); int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
int sclp_sdias_init(void); int sclp_sdias_init(void);
void sclp_sdias_exit(void);
enum { enum {
sclp_init_state_uninitialized, sclp_init_state_uninitialized,
......
// SPDX-License-Identifier: GPL-2.0
/*
* s390 crypto adapter related sclp functions.
*
* Copyright IBM Corp. 2020
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/sclp.h>
#include "sclp.h"
#define SCLP_CMDW_CONFIGURE_AP 0x001f0001
#define SCLP_CMDW_DECONFIGURE_AP 0x001e0001
struct ap_cfg_sccb {
struct sccb_header header;
} __packed;
static int do_ap_configure(sclp_cmdw_t cmd, u32 apid)
{
struct ap_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_AP_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct ap_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
cmd |= (apid & 0xFF) << 8;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020: case 0x0120: case 0x0440: case 0x0450:
break;
default:
pr_warn("configure AP adapter %u failed: cmd=0x%08x response=0x%04x\n",
apid, cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_ap_configure(u32 apid)
{
return do_ap_configure(SCLP_CMDW_CONFIGURE_AP, apid);
}
EXPORT_SYMBOL(sclp_ap_configure);
int sclp_ap_deconfigure(u32 apid)
{
return do_ap_configure(SCLP_CMDW_DECONFIGURE_AP, apid);
}
EXPORT_SYMBOL(sclp_ap_deconfigure);
...@@ -17,12 +17,12 @@ ...@@ -17,12 +17,12 @@
static struct read_info_sccb __bootdata(sclp_info_sccb); static struct read_info_sccb __bootdata(sclp_info_sccb);
static int __bootdata(sclp_info_sccb_valid); static int __bootdata(sclp_info_sccb_valid);
char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET; char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
int sclp_init_state __section(.data) = sclp_init_state_uninitialized; int sclp_init_state = sclp_init_state_uninitialized;
/* /*
* Used to keep track of the size of the event masks. Qemu until version 2.11 * Used to keep track of the size of the event masks. Qemu until version 2.11
* only supports 4 and needs a workaround. * only supports 4 and needs a workaround.
*/ */
bool sclp_mask_compat_mode __section(.data); bool sclp_mask_compat_mode;
void sclp_early_wait_irq(void) void sclp_early_wait_irq(void)
{ {
...@@ -214,11 +214,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220) ...@@ -214,11 +214,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
* Output one or more lines of text on the SCLP console (VT220 and / * Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode). * or line-mode).
*/ */
void __sclp_early_printk(const char *str, unsigned int len, unsigned int force) void __sclp_early_printk(const char *str, unsigned int len)
{ {
int have_linemode, have_vt220; int have_linemode, have_vt220;
if (!force && sclp_init_state != sclp_init_state_uninitialized) if (sclp_init_state != sclp_init_state_uninitialized)
return; return;
if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0) if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
return; return;
...@@ -231,12 +231,7 @@ void __sclp_early_printk(const char *str, unsigned int len, unsigned int force) ...@@ -231,12 +231,7 @@ void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
void sclp_early_printk(const char *str) void sclp_early_printk(const char *str)
{ {
__sclp_early_printk(str, strlen(str), 0); __sclp_early_printk(str, strlen(str));
}
void sclp_early_printk_force(const char *str)
{
__sclp_early_printk(str, strlen(str), 1);
} }
int __init sclp_early_read_info(void) int __init sclp_early_read_info(void)
......
...@@ -336,24 +336,6 @@ sclp_chars_in_buffer(struct sclp_buffer *buffer) ...@@ -336,24 +336,6 @@ sclp_chars_in_buffer(struct sclp_buffer *buffer)
return count; return count;
} }
/*
* sets or provides some values that influence the drivers behaviour
*/
void
sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
{
buffer->columns = columns;
if (buffer->current_line != NULL &&
buffer->current_length > buffer->columns)
sclp_finalize_mto(buffer);
}
void
sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
{
buffer->htab = htab;
}
/* /*
* called by sclp_console_init and/or sclp_tty_init * called by sclp_console_init and/or sclp_tty_init
*/ */
......
...@@ -86,8 +86,6 @@ void *sclp_unmake_buffer(struct sclp_buffer *); ...@@ -86,8 +86,6 @@ void *sclp_unmake_buffer(struct sclp_buffer *);
int sclp_buffer_space(struct sclp_buffer *); int sclp_buffer_space(struct sclp_buffer *);
int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int); int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int)); int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
void sclp_set_columns(struct sclp_buffer *, unsigned short);
void sclp_set_htab(struct sclp_buffer *, unsigned short);
int sclp_chars_in_buffer(struct sclp_buffer *); int sclp_chars_in_buffer(struct sclp_buffer *);
#ifdef CONFIG_SCLP_CONSOLE #ifdef CONFIG_SCLP_CONSOLE
......
...@@ -257,7 +257,7 @@ static int __init sclp_sdias_init_async(void) ...@@ -257,7 +257,7 @@ static int __init sclp_sdias_init_async(void)
int __init sclp_sdias_init(void) int __init sclp_sdias_init(void)
{ {
if (ipl_info.type != IPL_TYPE_FCP_DUMP) if (!is_ipl_type_dump())
return 0; return 0;
sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
BUG_ON(!sclp_sdias_sccb); BUG_ON(!sclp_sdias_sccb);
...@@ -275,9 +275,3 @@ int __init sclp_sdias_init(void) ...@@ -275,9 +275,3 @@ int __init sclp_sdias_init(void)
TRACE("init done\n"); TRACE("init done\n");
return 0; return 0;
} }
void __exit sclp_sdias_exit(void)
{
debug_unregister(sdias_dbf);
sclp_unregister(&sclp_sdias_register);
}
...@@ -238,7 +238,6 @@ extern int tape_do_io(struct tape_device *, struct tape_request *); ...@@ -238,7 +238,6 @@ extern int tape_do_io(struct tape_device *, struct tape_request *);
extern int tape_do_io_async(struct tape_device *, struct tape_request *); extern int tape_do_io_async(struct tape_device *, struct tape_request *);
extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
extern int tape_cancel_io(struct tape_device *, struct tape_request *); extern int tape_cancel_io(struct tape_device *, struct tape_request *);
void tape_hotplug_event(struct tape_device *, int major, int action);
static inline int static inline int
tape_do_io_free(struct tape_device *device, struct tape_request *request) tape_do_io_free(struct tape_device *device, struct tape_request *request)
...@@ -258,8 +257,6 @@ tape_do_io_async_free(struct tape_device *device, struct tape_request *request) ...@@ -258,8 +257,6 @@ tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
tape_do_io_async(device, request); tape_do_io_async(device, request);
} }
extern int tape_oper_handler(int irq, int status);
extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *); extern int tape_open(struct tape_device *);
extern int tape_release(struct tape_device *); extern int tape_release(struct tape_device *);
extern int tape_mtop(struct tape_device *, int, int); extern int tape_mtop(struct tape_device *, int, int);
......
...@@ -101,7 +101,6 @@ struct tape_request *tape_std_read_block(struct tape_device *, size_t); ...@@ -101,7 +101,6 @@ struct tape_request *tape_std_read_block(struct tape_device *, size_t);
void tape_std_read_backward(struct tape_device *device, void tape_std_read_backward(struct tape_device *device,
struct tape_request *request); struct tape_request *request);
struct tape_request *tape_std_write_block(struct tape_device *, size_t); struct tape_request *tape_std_write_block(struct tape_device *, size_t);
void tape_std_check_locate(struct tape_device *, struct tape_request *);
/* Some non-mtop commands. */ /* Some non-mtop commands. */
int tape_std_assign(struct tape_device *); int tape_std_assign(struct tape_device *);
...@@ -131,19 +130,8 @@ int tape_std_mtunload(struct tape_device *, int); ...@@ -131,19 +130,8 @@ int tape_std_mtunload(struct tape_device *, int);
int tape_std_mtweof(struct tape_device *, int); int tape_std_mtweof(struct tape_device *, int);
/* Event handlers */ /* Event handlers */
void tape_std_default_handler(struct tape_device *);
void tape_std_unexpect_uchk_handler(struct tape_device *);
void tape_std_irq(struct tape_device *);
void tape_std_process_eov(struct tape_device *); void tape_std_process_eov(struct tape_device *);
// the error recovery stuff:
void tape_std_error_recovery(struct tape_device *);
void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
void tape_std_error_recovery_succeded(struct tape_device *);
void tape_std_error_recovery_do_retry(struct tape_device *);
void tape_std_error_recovery_read_opposite(struct tape_device *);
void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
/* S390 tape types */ /* S390 tape types */
enum s390_tape_type { enum s390_tape_type {
tape_3480, tape_3480,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment