Commit 10de638d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

 - Add support for stackleak feature. Also allow specifying
   architecture-specific stackleak poison function to enable faster
   implementation. On s390, the mvc-based implementation helps decrease
   typical overhead from a factor of 3 to just 25%

 - Convert all assembler files to use SYM* style macros, deprecating the
   ENTRY() macro and other annotations. Select ARCH_USE_SYM_ANNOTATIONS

 - Improve KASLR to also randomize module and special amode31 code base
   load addresses

 - Rework decompressor memory tracking to support memory holes and
   improve error handling

 - Add support for protected virtualization AP binding

 - Add support for set_direct_map() calls

 - Implement set_memory_rox() and noexec module_alloc()

 - Remove obsolete overriding of mem*() functions for KASAN

 - Rework kexec/kdump to avoid using nodat_stack to call purgatory

 - Convert the rest of the s390 code to use flexible-array member
   instead of a zero-length array

 - Clean up uaccess inline asm

 - Enable ARCH_HAS_MEMBARRIER_SYNC_CORE

 - Convert to using CONFIG_FUNCTION_ALIGNMENT and enable
   DEBUG_FORCE_FUNCTION_ALIGN_64B

 - Resolve last_break in userspace fault reports

 - Simplify one-level sysctl registration

 - Clean up branch prediction handling

 - Rework CPU counter facility to retrieve available counter sets just
   once

 - Other various small fixes and improvements all over the code

* tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (118 commits)
  s390/stackleak: provide fast __stackleak_poison() implementation
  stackleak: allow to specify arch specific stackleak poison function
  s390: select ARCH_USE_SYM_ANNOTATIONS
  s390/mm: use VM_FLUSH_RESET_PERMS in module_alloc()
  s390: wire up memfd_secret system call
  s390/mm: enable ARCH_HAS_SET_DIRECT_MAP
  s390/mm: use BIT macro to generate SET_MEMORY bit masks
  s390/relocate_kernel: adjust indentation
  s390/relocate_kernel: use SYM* macros instead of ENTRY(), etc.
  s390/entry: use SYM* macros instead of ENTRY(), etc.
  s390/purgatory: use SYM* macros instead of ENTRY(), etc.
  s390/kprobes: use SYM* macros instead of ENTRY(), etc.
  s390/reipl: use SYM* macros instead of ENTRY(), etc.
  s390/head64: use SYM* macros instead of ENTRY(), etc.
  s390/earlypgm: use SYM* macros instead of ENTRY(), etc.
  s390/mcount: use SYM* macros instead of ENTRY(), etc.
  s390/crc32le: use SYM* macros instead of ENTRY(), etc.
  s390/crc32be: use SYM* macros instead of ENTRY(), etc.
  s390/crypto,chacha: use SYM* macros instead of ENTRY(), etc.
  s390/amode31: use SYM* macros instead of ENTRY(), etc.
  ...
parents d55571c0 2a405f6b
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
# #
# Architecture requirements # Architecture requirements
# #
# * arm/arm64/powerpc # * arm/arm64/powerpc/s390
# #
# Rely on implicit context synchronization as a result of exception return # Rely on implicit context synchronization as a result of exception return
# when returning from IPI handler, and when returning to user-space. # when returning from IPI handler, and when returning to user-space.
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
| parisc: | TODO | | parisc: | TODO |
| powerpc: | ok | | powerpc: | ok |
| riscv: | TODO | | riscv: | TODO |
| s390: | TODO | | s390: | ok |
| sh: | TODO | | sh: | TODO |
| sparc: | TODO | | sparc: | TODO |
| um: | TODO | | um: | TODO |
......
...@@ -26,10 +26,6 @@ config GENERIC_BUG ...@@ -26,10 +26,6 @@ config GENERIC_BUG
config GENERIC_BUG_RELATIVE_POINTERS config GENERIC_BUG_RELATIVE_POINTERS
def_bool y def_bool y
config GENERIC_CSUM
bool
default y if KASAN
config GENERIC_LOCKBREAK config GENERIC_LOCKBREAK
def_bool y if PREEMPTION def_bool y if PREEMPTION
...@@ -76,10 +72,12 @@ config S390 ...@@ -76,10 +72,12 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME select ARCH_HAS_SCALED_CPUTIME
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_STRICT_MODULE_RWX
...@@ -123,6 +121,7 @@ config S390 ...@@ -123,6 +121,7 @@ config S390
select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_NO_INSTR select ARCH_WANTS_NO_INSTR
select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_DEFAULT_BPF_JIT
...@@ -132,6 +131,8 @@ config S390 ...@@ -132,6 +131,8 @@ config S390
select CLONE_BACKWARDS2 select CLONE_BACKWARDS2
select DMA_OPS if PCI select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER select DYNAMIC_FTRACE if FUNCTION_TRACER
select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC
select GCC12_NO_ARRAY_BOUNDS select GCC12_NO_ARRAY_BOUNDS
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_AUTOPROBE
...@@ -153,6 +154,7 @@ config S390 ...@@ -153,6 +154,7 @@ config S390
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_SOFT_DIRTY
select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_VMAP_STACK select HAVE_ARCH_VMAP_STACK
......
...@@ -66,16 +66,6 @@ static struct ctl_table appldata_table[] = { ...@@ -66,16 +66,6 @@ static struct ctl_table appldata_table[] = {
{ }, { },
}; };
static struct ctl_table appldata_dir_table[] = {
{
.procname = appldata_proc_name,
.maxlen = 0,
.mode = S_IRUGO | S_IXUGO,
.child = appldata_table,
},
{ },
};
/* /*
* Timer * Timer
*/ */
...@@ -291,7 +281,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write, ...@@ -291,7 +281,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
mutex_lock(&appldata_ops_mutex); mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) { list_for_each(lh, &appldata_ops_list) {
tmp_ops = list_entry(lh, struct appldata_ops, list); tmp_ops = list_entry(lh, struct appldata_ops, list);
if (&tmp_ops->ctl_table[2] == ctl) { if (&tmp_ops->ctl_table[0] == ctl) {
found = 1; found = 1;
} }
} }
...@@ -361,7 +351,8 @@ int appldata_register_ops(struct appldata_ops *ops) ...@@ -361,7 +351,8 @@ int appldata_register_ops(struct appldata_ops *ops)
if (ops->size > APPLDATA_MAX_REC_SIZE) if (ops->size > APPLDATA_MAX_REC_SIZE)
return -EINVAL; return -EINVAL;
ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL); /* The last entry must be an empty one */
ops->ctl_table = kcalloc(2, sizeof(struct ctl_table), GFP_KERNEL);
if (!ops->ctl_table) if (!ops->ctl_table)
return -ENOMEM; return -ENOMEM;
...@@ -369,17 +360,12 @@ int appldata_register_ops(struct appldata_ops *ops) ...@@ -369,17 +360,12 @@ int appldata_register_ops(struct appldata_ops *ops)
list_add(&ops->list, &appldata_ops_list); list_add(&ops->list, &appldata_ops_list);
mutex_unlock(&appldata_ops_mutex); mutex_unlock(&appldata_ops_mutex);
ops->ctl_table[0].procname = appldata_proc_name; ops->ctl_table[0].procname = ops->name;
ops->ctl_table[0].maxlen = 0; ops->ctl_table[0].mode = S_IRUGO | S_IWUSR;
ops->ctl_table[0].mode = S_IRUGO | S_IXUGO; ops->ctl_table[0].proc_handler = appldata_generic_handler;
ops->ctl_table[0].child = &ops->ctl_table[2]; ops->ctl_table[0].data = ops;
ops->ctl_table[2].procname = ops->name;
ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
ops->ctl_table[2].proc_handler = appldata_generic_handler;
ops->ctl_table[2].data = ops;
ops->sysctl_header = register_sysctl_table(ops->ctl_table); ops->sysctl_header = register_sysctl(appldata_proc_name, ops->ctl_table);
if (!ops->sysctl_header) if (!ops->sysctl_header)
goto out; goto out;
return 0; return 0;
...@@ -422,7 +408,7 @@ static int __init appldata_init(void) ...@@ -422,7 +408,7 @@ static int __init appldata_init(void)
appldata_wq = alloc_ordered_workqueue("appldata", 0); appldata_wq = alloc_ordered_workqueue("appldata", 0);
if (!appldata_wq) if (!appldata_wq)
return -ENOMEM; return -ENOMEM;
appldata_sysctl_header = register_sysctl_table(appldata_dir_table); appldata_sysctl_header = register_sysctl(appldata_proc_name, appldata_table);
return 0; return 0;
} }
......
...@@ -35,7 +35,7 @@ endif ...@@ -35,7 +35,7 @@ endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o vmem.o obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/physmem_info.h>
struct machine_info { struct machine_info {
unsigned char has_edat1 : 1; unsigned char has_edat1 : 1;
unsigned char has_edat2 : 1; unsigned char has_edat2 : 1;
...@@ -30,24 +32,46 @@ struct vmlinux_info { ...@@ -30,24 +32,46 @@ struct vmlinux_info {
unsigned long init_mm_off; unsigned long init_mm_off;
unsigned long swapper_pg_dir_off; unsigned long swapper_pg_dir_off;
unsigned long invalid_pg_dir_off; unsigned long invalid_pg_dir_off;
#ifdef CONFIG_KASAN
unsigned long kasan_early_shadow_page_off;
unsigned long kasan_early_shadow_pte_off;
unsigned long kasan_early_shadow_pmd_off;
unsigned long kasan_early_shadow_pud_off;
unsigned long kasan_early_shadow_p4d_off;
#endif
}; };
void startup_kernel(void); void startup_kernel(void);
unsigned long detect_memory(unsigned long *safe_addr); unsigned long detect_max_physmem_end(void);
void mem_detect_set_usable_limit(unsigned long limit); void detect_physmem_online_ranges(unsigned long max_physmem_end);
void physmem_set_usable_limit(unsigned long limit);
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size);
void physmem_free(enum reserved_range_type type);
/* for continuous/multiple allocations per type */
unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
unsigned long align);
/* for single allocations, 1 per type */
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
unsigned long align, unsigned long min, unsigned long max,
bool die_on_oom);
unsigned long get_physmem_alloc_pos(void);
bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
unsigned long *intersection_start);
bool is_ipl_block_dump(void); bool is_ipl_block_dump(void);
void store_ipl_parmblock(void); void store_ipl_parmblock(void);
unsigned long read_ipl_report(unsigned long safe_addr); int read_ipl_report(void);
void save_ipl_cert_comp_list(void);
void setup_boot_command_line(void); void setup_boot_command_line(void);
void parse_boot_command_line(void); void parse_boot_command_line(void);
void verify_facilities(void); void verify_facilities(void);
void print_missing_facilities(void); void print_missing_facilities(void);
void sclp_early_setup_buffer(void); void sclp_early_setup_buffer(void);
void print_pgm_check_info(void); void print_pgm_check_info(void);
unsigned long get_random_base(unsigned long safe_addr); unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max);
void setup_vmem(unsigned long asce_limit); void setup_vmem(unsigned long asce_limit);
unsigned long vmem_estimate_memory_needs(unsigned long online_mem_total);
void __printf(1, 2) decompressor_printk(const char *fmt, ...); void __printf(1, 2) decompressor_printk(const char *fmt, ...);
void print_stacktrace(unsigned long sp);
void error(char *m); void error(char *m);
extern struct machine_info machine; extern struct machine_info machine;
...@@ -57,12 +81,11 @@ extern const char kernel_version[]; ...@@ -57,12 +81,11 @@ extern const char kernel_version[];
extern unsigned long memory_limit; extern unsigned long memory_limit;
extern unsigned long vmalloc_size; extern unsigned long vmalloc_size;
extern int vmalloc_size_set; extern int vmalloc_size_set;
extern int kaslr_enabled;
extern char __boot_data_start[], __boot_data_end[]; extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[]; extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
extern char _decompressor_syms_start[], _decompressor_syms_end[]; extern char _decompressor_syms_start[], _decompressor_syms_end[];
extern char _stack_start[], _stack_end[]; extern char _stack_start[], _stack_end[];
extern char _end[]; extern char _end[], _decompressor_end[];
extern unsigned char _compressed_start[]; extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[]; extern unsigned char _compressed_end[];
extern struct vmlinux_info _vmlinux_info; extern struct vmlinux_info _vmlinux_info;
...@@ -70,5 +93,10 @@ extern struct vmlinux_info _vmlinux_info; ...@@ -70,5 +93,10 @@ extern struct vmlinux_info _vmlinux_info;
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore)) #define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
static inline bool intersects(unsigned long addr0, unsigned long size0,
unsigned long addr1, unsigned long size1)
{
return addr0 + size0 > addr1 && addr1 + size1 > addr0;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* BOOT_BOOT_H */ #endif /* BOOT_BOOT_H */
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
echo "Warning: '${INSTALLKERNEL}' command not available - additional " \ echo "Warning: '${INSTALLKERNEL}' command not available - additional " \
"bootloader config required" >&2 "bootloader config required" >&2
if [ -f $4/vmlinuz-$1 ]; then mv $4/vmlinuz-$1 $4/vmlinuz-$1.old; fi if [ -f "$4/vmlinuz-$1" ]; then mv -- "$4/vmlinuz-$1" "$4/vmlinuz-$1.old"; fi
if [ -f $4/System.map-$1 ]; then mv $4/System.map-$1 $4/System.map-$1.old; fi if [ -f "$4/System.map-$1" ]; then mv -- "$4/System.map-$1" "$4/System.map-$1.old"; fi
cat $2 > $4/vmlinuz-$1 cat -- "$2" > "$4/vmlinuz-$1"
cp $3 $4/System.map-$1 cp -- "$3" "$4/System.map-$1"
...@@ -24,11 +24,11 @@ int __bootdata(noexec_disabled); ...@@ -24,11 +24,11 @@ int __bootdata(noexec_disabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL; unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block); struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid); int __bootdata_preserved(ipl_block_valid);
int __bootdata_preserved(__kaslr_enabled);
unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE; unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit; unsigned long memory_limit;
int vmalloc_size_set; int vmalloc_size_set;
int kaslr_enabled;
static inline int __diag308(unsigned long subcode, void *addr) static inline int __diag308(unsigned long subcode, void *addr)
{ {
...@@ -264,7 +264,7 @@ void parse_boot_command_line(void) ...@@ -264,7 +264,7 @@ void parse_boot_command_line(void)
char *args; char *args;
int rc; int rc;
kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE); __kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
args = strcpy(command_line_buf, early_command_line); args = strcpy(command_line_buf, early_command_line);
while (*args) { while (*args) {
args = next_arg(args, &param, &val); args = next_arg(args, &param, &val);
...@@ -300,7 +300,7 @@ void parse_boot_command_line(void) ...@@ -300,7 +300,7 @@ void parse_boot_command_line(void)
modify_fac_list(val); modify_fac_list(val);
if (!strcmp(param, "nokaslr")) if (!strcmp(param, "nokaslr"))
kaslr_enabled = 0; __kaslr_enabled = 0;
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
if (!strcmp(param, "prot_virt")) { if (!strcmp(param, "prot_virt")) {
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/boot_data.h> #include <asm/boot_data.h>
#include <asm/physmem_info.h>
#include <uapi/asm/ipl.h> #include <uapi/asm/ipl.h>
#include "boot.h" #include "boot.h"
...@@ -16,20 +17,16 @@ unsigned long __bootdata_preserved(ipl_cert_list_size); ...@@ -16,20 +17,16 @@ unsigned long __bootdata_preserved(ipl_cert_list_size);
unsigned long __bootdata(early_ipl_comp_list_addr); unsigned long __bootdata(early_ipl_comp_list_addr);
unsigned long __bootdata(early_ipl_comp_list_size); unsigned long __bootdata(early_ipl_comp_list_size);
static struct ipl_rb_certificates *certs;
static struct ipl_rb_components *comps;
static bool ipl_report_needs_saving;
#define for_each_rb_entry(entry, rb) \ #define for_each_rb_entry(entry, rb) \
for (entry = rb->entries; \ for (entry = rb->entries; \
(void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \ (void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
entry++) entry++)
static inline bool intersects(unsigned long addr0, unsigned long size0, static unsigned long get_cert_comp_list_size(void)
unsigned long addr1, unsigned long size1)
{
return addr0 + size0 > addr1 && addr1 + size1 > addr0;
}
static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
struct ipl_rb_certificates *certs,
unsigned long safe_addr)
{ {
struct ipl_rb_certificate_entry *cert; struct ipl_rb_certificate_entry *cert;
struct ipl_rb_component_entry *comp; struct ipl_rb_component_entry *comp;
...@@ -44,44 +41,27 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps, ...@@ -44,44 +41,27 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
ipl_cert_list_size = 0; ipl_cert_list_size = 0;
for_each_rb_entry(cert, certs) for_each_rb_entry(cert, certs)
ipl_cert_list_size += sizeof(unsigned int) + cert->len; ipl_cert_list_size += sizeof(unsigned int) + cert->len;
size = ipl_cert_list_size + early_ipl_comp_list_size; return ipl_cert_list_size + early_ipl_comp_list_size;
}
/* bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
* Start from safe_addr to find a free memory area large unsigned long *intersection_start)
* enough for the IPL report boot data. This area is used {
* for ipl_cert_list_addr/ipl_cert_list_size and struct ipl_rb_certificate_entry *cert;
* early_ipl_comp_list_addr/early_ipl_comp_list_size. It must
* not overlap with any component or any certificate. if (!ipl_report_needs_saving)
*/ return false;
repeat:
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size && for_each_rb_entry(cert, certs) {
intersects(initrd_data.start, initrd_data.size, safe_addr, size)) if (intersects(addr, size, cert->addr, cert->len)) {
safe_addr = initrd_data.start + initrd_data.size; *intersection_start = cert->addr;
if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) { return true;
safe_addr = (unsigned long)comps + comps->len;
goto repeat;
}
for_each_rb_entry(comp, comps)
if (intersects(safe_addr, size, comp->addr, comp->len)) {
safe_addr = comp->addr + comp->len;
goto repeat;
}
if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
safe_addr = (unsigned long)certs + certs->len;
goto repeat;
} }
for_each_rb_entry(cert, certs)
if (intersects(safe_addr, size, cert->addr, cert->len)) {
safe_addr = cert->addr + cert->len;
goto repeat;
} }
early_ipl_comp_list_addr = safe_addr; return false;
ipl_cert_list_addr = safe_addr + early_ipl_comp_list_size;
return safe_addr + size;
} }
static void copy_components_bootdata(struct ipl_rb_components *comps) static void copy_components_bootdata(void)
{ {
struct ipl_rb_component_entry *comp, *ptr; struct ipl_rb_component_entry *comp, *ptr;
...@@ -90,7 +70,7 @@ static void copy_components_bootdata(struct ipl_rb_components *comps) ...@@ -90,7 +70,7 @@ static void copy_components_bootdata(struct ipl_rb_components *comps)
memcpy(ptr++, comp, sizeof(*ptr)); memcpy(ptr++, comp, sizeof(*ptr));
} }
static void copy_certificates_bootdata(struct ipl_rb_certificates *certs) static void copy_certificates_bootdata(void)
{ {
struct ipl_rb_certificate_entry *cert; struct ipl_rb_certificate_entry *cert;
void *ptr; void *ptr;
...@@ -104,10 +84,8 @@ static void copy_certificates_bootdata(struct ipl_rb_certificates *certs) ...@@ -104,10 +84,8 @@ static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
} }
} }
unsigned long read_ipl_report(unsigned long safe_addr) int read_ipl_report(void)
{ {
struct ipl_rb_certificates *certs;
struct ipl_rb_components *comps;
struct ipl_pl_hdr *pl_hdr; struct ipl_pl_hdr *pl_hdr;
struct ipl_rl_hdr *rl_hdr; struct ipl_rl_hdr *rl_hdr;
struct ipl_rb_hdr *rb_hdr; struct ipl_rb_hdr *rb_hdr;
...@@ -120,7 +98,7 @@ unsigned long read_ipl_report(unsigned long safe_addr) ...@@ -120,7 +98,7 @@ unsigned long read_ipl_report(unsigned long safe_addr)
*/ */
if (!ipl_block_valid || if (!ipl_block_valid ||
!(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR)) !(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
return safe_addr; return -1;
ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL); ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
/* /*
* There is an IPL report, to find it load the pointer to the * There is an IPL report, to find it load the pointer to the
...@@ -158,16 +136,30 @@ unsigned long read_ipl_report(unsigned long safe_addr) ...@@ -158,16 +136,30 @@ unsigned long read_ipl_report(unsigned long safe_addr)
* With either the component list or the certificate list * With either the component list or the certificate list
* missing the kernel will stay ignorant of secure IPL. * missing the kernel will stay ignorant of secure IPL.
*/ */
if (!comps || !certs) if (!comps || !certs) {
return safe_addr; certs = NULL;
return -1;
}
/* ipl_report_needs_saving = true;
* Copy component and certificate list to a safe area physmem_reserve(RR_IPLREPORT, (unsigned long)pl_hdr,
* where the decompressed kernel can find them. (unsigned long)rl_end - (unsigned long)pl_hdr);
*/ return 0;
safe_addr = find_bootdata_space(comps, certs, safe_addr); }
copy_components_bootdata(comps);
copy_certificates_bootdata(certs); void save_ipl_cert_comp_list(void)
{
unsigned long size;
if (!ipl_report_needs_saving)
return;
size = get_cert_comp_list_size();
early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int));
ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
return safe_addr; copy_components_bootdata();
copy_certificates_bootdata();
physmem_free(RR_IPLREPORT);
ipl_report_needs_saving = false;
} }
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* Copyright IBM Corp. 2019 * Copyright IBM Corp. 2019
*/ */
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/mem_detect.h> #include <asm/physmem_info.h>
#include <asm/cpacf.h> #include <asm/cpacf.h>
#include <asm/timex.h> #include <asm/timex.h>
#include <asm/sclp.h> #include <asm/sclp.h>
...@@ -91,113 +91,108 @@ static int get_random(unsigned long limit, unsigned long *value) ...@@ -91,113 +91,108 @@ static int get_random(unsigned long limit, unsigned long *value)
return 0; return 0;
} }
/* static void sort_reserved_ranges(struct reserved_range *res, unsigned long size)
* To randomize kernel base address we have to consider several facts:
* 1. physical online memory might not be continuous and have holes. mem_detect
* info contains list of online memory ranges we should consider.
* 2. we have several memory regions which are occupied and we should not
* overlap and destroy them. Currently safe_addr tells us the border below
* which all those occupied regions are. We are safe to use anything above
* safe_addr.
* 3. the upper limit might apply as well, even if memory above that limit is
* online. Currently those limitations are:
* 3.1. Limit set by "mem=" kernel command line option
* 3.2. memory reserved at the end for kasan initialization.
* 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
* Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
* (16 pages when the kernel is built with kasan enabled)
* Assumptions:
* 1. kernel size (including .bss size) and upper memory limit are page aligned.
* 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
* aligned (in practice memory configurations granularity on z/VM and LPAR
* is 1mb).
*
* To guarantee uniform distribution of kernel base address among all suitable
* addresses we generate random value just once. For that we need to build a
* continuous range in which every value would be suitable. We can build this
* range by simply counting all suitable addresses (let's call them positions)
* which would be valid as kernel base address. To count positions we iterate
* over online memory ranges. For each range which is big enough for the
* kernel image we count all suitable addresses we can put the kernel image at
* that is
* (end - start - kernel_size) / THREAD_SIZE + 1
* Two functions count_valid_kernel_positions and position_to_address help
* to count positions in memory range given and then convert position back
* to address.
*/
static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
unsigned long _min,
unsigned long _max)
{ {
unsigned long start, end, pos = 0; struct reserved_range tmp;
int i; int i, j;
for_each_mem_detect_usable_block(i, &start, &end) { for (i = 1; i < size; i++) {
if (_min >= end) tmp = res[i];
continue; for (j = i - 1; j >= 0 && res[j].start > tmp.start; j--)
if (start >= _max) res[j + 1] = res[j];
break; res[j + 1] = tmp;
start = max(_min, start);
end = min(_max, end);
if (end - start < kernel_size)
continue;
pos += (end - start - kernel_size) / THREAD_SIZE + 1;
} }
return pos;
} }
static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size, static unsigned long iterate_valid_positions(unsigned long size, unsigned long align,
unsigned long _min, unsigned long _max) unsigned long _min, unsigned long _max,
struct reserved_range *res, size_t res_count,
bool pos_count, unsigned long find_pos)
{ {
unsigned long start, end; unsigned long start, end, tmp_end, range_pos, pos = 0;
struct reserved_range *res_end = res + res_count;
struct reserved_range *skip_res;
int i; int i;
for_each_mem_detect_usable_block(i, &start, &end) { align = max(align, 8UL);
_min = round_up(_min, align);
for_each_physmem_usable_range(i, &start, &end) {
if (_min >= end) if (_min >= end)
continue; continue;
start = round_up(start, align);
if (start >= _max) if (start >= _max)
break; break;
start = max(_min, start); start = max(_min, start);
end = min(_max, end); end = min(_max, end);
if (end - start < kernel_size)
continue; while (start + size <= end) {
if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos) /* skip reserved ranges below the start */
return start + (pos - 1) * THREAD_SIZE; while (res && res->end <= start) {
pos -= (end - start - kernel_size) / THREAD_SIZE + 1; res++;
if (res >= res_end)
res = NULL;
}
skip_res = NULL;
tmp_end = end;
/* has intersecting reserved range */
if (res && res->start < end) {
skip_res = res;
tmp_end = res->start;
}
if (start + size <= tmp_end) {
range_pos = (tmp_end - start - size) / align + 1;
if (pos_count) {
pos += range_pos;
} else {
if (range_pos >= find_pos)
return start + (find_pos - 1) * align;
find_pos -= range_pos;
}
}
if (!skip_res)
break;
start = round_up(skip_res->end, align);
}
} }
return 0; return pos_count ? pos : 0;
} }
unsigned long get_random_base(unsigned long safe_addr) /*
{ * Two types of decompressor memory allocations/reserves are considered
unsigned long usable_total = get_mem_detect_usable_total(); * differently.
unsigned long memory_limit = get_mem_detect_end(); *
unsigned long base_pos, max_pos, kernel_size; * "Static" or "single" allocations are done via physmem_alloc_range() and
int i; * physmem_reserve(), and they are listed in physmem_info.reserved[]. Each
* type of "static" allocation can only have one allocation per type and
/* * cannot have chains.
* Avoid putting kernel in the end of physical memory *
* which vmem and kasan code will use for shadow memory and * On the other hand, "dynamic" or "repetitive" allocations are done via
* pgtable mapping allocations. * physmem_alloc_top_down(). These allocations are tightly packed together
* top down from the end of online memory. physmem_alloc_pos represents
* current position where those allocations start.
*
* Functions randomize_within_range() and iterate_valid_positions()
* only consider "dynamic" allocations by never looking above
* physmem_alloc_pos. "Static" allocations, however, are explicitly
* considered by checking the "res" (reserves) array. The first
* reserved_range of a "dynamic" allocation may also be checked along the
* way, but it will always be above the maximum value anyway.
*/ */
memory_limit -= kasan_estimate_memory_needs(usable_total); unsigned long randomize_within_range(unsigned long size, unsigned long align,
memory_limit -= vmem_estimate_memory_needs(usable_total); unsigned long min, unsigned long max)
{
struct reserved_range res[RR_MAX];
unsigned long max_pos, pos;
safe_addr = ALIGN(safe_addr, THREAD_SIZE); memcpy(res, physmem_info.reserved, sizeof(res));
kernel_size = vmlinux.image_size + vmlinux.bss_size; sort_reserved_ranges(res, ARRAY_SIZE(res));
if (safe_addr + kernel_size > memory_limit) max = min(max, get_physmem_alloc_pos());
return 0;
max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit); max_pos = iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), true, 0);
if (!max_pos) { if (!max_pos)
sclp_early_printk("KASLR disabled: not enough memory\n");
return 0; return 0;
} if (get_random(max_pos, &pos))
/* we need a value in the range [1, base_pos] inclusive */
if (get_random(max_pos, &base_pos))
return 0; return 0;
return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit); return iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), false, pos + 1);
} }
...@@ -123,11 +123,10 @@ void decompressor_printk(const char *fmt, ...) ...@@ -123,11 +123,10 @@ void decompressor_printk(const char *fmt, ...)
sclp_early_printk(buf); sclp_early_printk(buf);
} }
static noinline void print_stacktrace(void) void print_stacktrace(unsigned long sp)
{ {
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start, struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
(unsigned long)_stack_end }; (unsigned long)_stack_end };
unsigned long sp = S390_lowcore.gpregs_save_area[15];
bool first = true; bool first = true;
decompressor_printk("Call Trace:\n"); decompressor_printk("Call Trace:\n");
...@@ -154,7 +153,7 @@ void print_pgm_check_info(void) ...@@ -154,7 +153,7 @@ void print_pgm_check_info(void)
decompressor_printk("Kernel command line: %s\n", early_command_line); decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n", decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
S390_lowcore.pgm_code, S390_lowcore.pgm_ilc >> 1); S390_lowcore.pgm_code, S390_lowcore.pgm_ilc >> 1);
if (kaslr_enabled) if (kaslr_enabled())
decompressor_printk("Kernel random base: %lx\n", __kaslr_offset); decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
decompressor_printk("PSW : %016lx %016lx (%pS)\n", decompressor_printk("PSW : %016lx %016lx (%pS)\n",
S390_lowcore.psw_save_area.mask, S390_lowcore.psw_save_area.mask,
...@@ -173,7 +172,7 @@ void print_pgm_check_info(void) ...@@ -173,7 +172,7 @@ void print_pgm_check_info(void)
gpregs[8], gpregs[9], gpregs[10], gpregs[11]); gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n", decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[12], gpregs[13], gpregs[14], gpregs[15]); gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace(); print_stacktrace(S390_lowcore.gpregs_save_area[15]);
decompressor_printk("Last Breaking-Event-Address:\n"); decompressor_printk("Last Breaking-Event-Address:\n");
decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.pgm_last_break, decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.pgm_last_break,
(void *)S390_lowcore.pgm_last_break); (void *)S390_lowcore.pgm_last_break);
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/processor.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/physmem_info.h>
#include <asm/stacktrace.h>
#include <asm/boot_data.h>
#include <asm/sparsemem.h>
#include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/processor.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/sections.h> #include <asm/uv.h>
#include <asm/mem_detect.h>
#include <asm/sparsemem.h>
#include "decompressor.h" #include "decompressor.h"
#include "boot.h" #include "boot.h"
struct mem_detect_info __bootdata(mem_detect); struct physmem_info __bootdata(physmem_info);
static unsigned int physmem_alloc_ranges;
static unsigned long physmem_alloc_pos;
/* up to 256 storage elements, 1020 subincrements each */ /* up to 256 storage elements, 1020 subincrements each */
#define ENTRIES_EXTENDED_MAX \ #define ENTRIES_EXTENDED_MAX \
(256 * (1020 / 2) * sizeof(struct mem_detect_block)) (256 * (1020 / 2) * sizeof(struct physmem_range))
static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n) static struct physmem_range *__get_physmem_range_ptr(u32 n)
{ {
if (n < MEM_INLINED_ENTRIES) if (n < MEM_INLINED_ENTRIES)
return &mem_detect.entries[n]; return &physmem_info.online[n];
return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES]; if (unlikely(!physmem_info.online_extended)) {
physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
physmem_alloc_pos, true);
}
return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
} }
/* /*
* sequential calls to add_mem_detect_block with adjacent memory areas * sequential calls to add_physmem_online_range with adjacent memory ranges
* are merged together into single memory block. * are merged together into single memory range.
*/ */
void add_mem_detect_block(u64 start, u64 end) void add_physmem_online_range(u64 start, u64 end)
{ {
struct mem_detect_block *block; struct physmem_range *range;
if (mem_detect.count) { if (physmem_info.range_count) {
block = __get_mem_detect_block_ptr(mem_detect.count - 1); range = __get_physmem_range_ptr(physmem_info.range_count - 1);
if (block->end == start) { if (range->end == start) {
block->end = end; range->end = end;
return; return;
} }
} }
block = __get_mem_detect_block_ptr(mem_detect.count); range = __get_physmem_range_ptr(physmem_info.range_count);
block->start = start; range->start = start;
block->end = end; range->end = end;
mem_detect.count++; physmem_info.range_count++;
} }
static int __diag260(unsigned long rx1, unsigned long rx2) static int __diag260(unsigned long rx1, unsigned long rx2)
...@@ -95,7 +105,7 @@ static int diag260(void) ...@@ -95,7 +105,7 @@ static int diag260(void)
return -1; return -1;
for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++) for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1); add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
return 0; return 0;
} }
...@@ -143,49 +153,176 @@ static unsigned long search_mem_end(void) ...@@ -143,49 +153,176 @@ static unsigned long search_mem_end(void)
return (offset + 1) << 20; return (offset + 1) << 20;
} }
unsigned long detect_memory(unsigned long *safe_addr) unsigned long detect_max_physmem_end(void)
{ {
unsigned long max_physmem_end = 0; unsigned long max_physmem_end = 0;
sclp_early_get_memsize(&max_physmem_end); if (!sclp_early_get_memsize(&max_physmem_end)) {
mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64)); physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
} else {
max_physmem_end = search_mem_end();
physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
}
return max_physmem_end;
}
void detect_physmem_online_ranges(unsigned long max_physmem_end)
{
if (!sclp_early_read_storage_info()) { if (!sclp_early_read_storage_info()) {
mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO; physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
} else if (!diag260()) { } else if (!diag260()) {
mem_detect.info_source = MEM_DETECT_DIAG260; physmem_info.info_source = MEM_DETECT_DIAG260;
max_physmem_end = max_physmem_end ?: get_mem_detect_end();
} else if (max_physmem_end) { } else if (max_physmem_end) {
add_mem_detect_block(0, max_physmem_end); add_physmem_online_range(0, max_physmem_end);
mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
} else {
max_physmem_end = search_mem_end();
add_mem_detect_block(0, max_physmem_end);
mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
} }
}
void physmem_set_usable_limit(unsigned long limit)
{
physmem_info.usable = limit;
physmem_alloc_pos = limit;
}
static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
{
unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
struct reserved_range *range;
enum reserved_range_type t;
int i;
if (mem_detect.count > MEM_INLINED_ENTRIES) { decompressor_printk("Linux version %s\n", kernel_version);
*safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) * if (!is_prot_virt_guest() && early_command_line[0])
sizeof(struct mem_detect_block); decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
size, align, min, max);
decompressor_printk("Reserved memory ranges:\n");
for_each_physmem_reserved_range(t, range, &start, &end) {
decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
total_reserved_mem += end - start;
} }
decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
get_physmem_info_source(), physmem_info.info_source);
for_each_physmem_usable_range(i, &start, &end) {
decompressor_printk("%016lx %016lx\n", start, end);
total_mem += end - start;
}
decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
total_mem, total_reserved_mem,
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
print_stacktrace(current_frame_address());
sclp_early_printk("\n\n -- System halted\n");
disabled_wait();
}
return max_physmem_end; void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
{
physmem_info.reserved[type].start = addr;
physmem_info.reserved[type].end = addr + size;
} }
void mem_detect_set_usable_limit(unsigned long limit) void physmem_free(enum reserved_range_type type)
{ {
struct mem_detect_block *block; physmem_info.reserved[type].start = 0;
int i; physmem_info.reserved[type].end = 0;
}
/* make sure mem_detect.usable ends up within online memory block */ static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
for (i = 0; i < mem_detect.count; i++) { unsigned long *intersection_start)
block = __get_mem_detect_block_ptr(i); {
if (block->start >= limit) unsigned long res_addr, res_size;
break; int t;
if (block->end >= limit) {
mem_detect.usable = limit; for (t = 0; t < RR_MAX; t++) {
if (!get_physmem_reserved(t, &res_addr, &res_size))
continue;
if (intersects(addr, size, res_addr, res_size)) {
*intersection_start = res_addr;
return true;
}
}
return ipl_report_certs_intersects(addr, size, intersection_start);
}
static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max,
unsigned int from_ranges, unsigned int *ranges_left,
bool die_on_oom)
{
unsigned int nranges = from_ranges ?: physmem_info.range_count;
unsigned long range_start, range_end;
unsigned long intersection_start;
unsigned long addr, pos = max;
align = max(align, 8UL);
while (nranges) {
__get_physmem_range(nranges - 1, &range_start, &range_end, false);
pos = min(range_end, pos);
if (round_up(min, align) + size > pos)
break; break;
addr = round_down(pos - size, align);
if (range_start > addr) {
nranges--;
continue;
}
if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
pos = intersection_start;
continue;
}
if (ranges_left)
*ranges_left = nranges;
return addr;
} }
mem_detect.usable = block->end; if (die_on_oom)
die_oom(size, align, min, max);
return 0;
}
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
unsigned long align, unsigned long min, unsigned long max,
bool die_on_oom)
{
unsigned long addr;
max = min(max, physmem_alloc_pos);
addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
if (addr)
physmem_reserve(type, addr, size);
return addr;
}
unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
unsigned long align)
{
struct reserved_range *range = &physmem_info.reserved[type];
struct reserved_range *new_range;
unsigned int ranges_left;
unsigned long addr;
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
&ranges_left, true);
/* if not a consecutive allocation of the same type or first allocation */
if (range->start != addr + size) {
if (range->end) {
physmem_alloc_pos = __physmem_alloc_range(
sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
physmem_alloc_ranges, &ranges_left, true);
new_range = (struct reserved_range *)physmem_alloc_pos;
*new_range = *range;
range->chain = new_range;
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
ranges_left, &ranges_left, true);
}
range->end = addr + size;
} }
range->start = addr;
physmem_alloc_pos = addr;
physmem_alloc_ranges = ranges_left;
return addr;
}
unsigned long get_physmem_alloc_pos(void)
{
return physmem_alloc_pos;
} }
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <asm/diag.h> #include <asm/diag.h>
#include <asm/uv.h> #include <asm/uv.h>
#include <asm/abs_lowcore.h> #include <asm/abs_lowcore.h>
#include <asm/mem_detect.h> #include <asm/physmem_info.h>
#include "decompressor.h" #include "decompressor.h"
#include "boot.h" #include "boot.h"
#include "uv.h" #include "uv.h"
...@@ -21,7 +21,6 @@ unsigned long __bootdata_preserved(__kaslr_offset); ...@@ -21,7 +21,6 @@ unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata_preserved(__abs_lowcore); unsigned long __bootdata_preserved(__abs_lowcore);
unsigned long __bootdata_preserved(__memcpy_real_area); unsigned long __bootdata_preserved(__memcpy_real_area);
pte_t *__bootdata_preserved(memcpy_real_ptep); pte_t *__bootdata_preserved(memcpy_real_ptep);
unsigned long __bootdata(__amode31_base);
unsigned long __bootdata_preserved(VMALLOC_START); unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END); unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap); struct page *__bootdata_preserved(vmemmap);
...@@ -29,8 +28,6 @@ unsigned long __bootdata_preserved(vmemmap_size); ...@@ -29,8 +28,6 @@ unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR); unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END); unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size); unsigned long __bootdata(ident_map_size);
int __bootdata(is_full_image) = 1;
struct initrd_data __bootdata(initrd_data);
u64 __bootdata_preserved(stfle_fac_list[16]); u64 __bootdata_preserved(stfle_fac_list[16]);
u64 __bootdata_preserved(alt_stfle_fac_list[16]); u64 __bootdata_preserved(alt_stfle_fac_list[16]);
...@@ -76,17 +73,20 @@ unsigned long mem_safe_offset(void) ...@@ -76,17 +73,20 @@ unsigned long mem_safe_offset(void)
} }
#endif #endif
static unsigned long rescue_initrd(unsigned long safe_addr) static void rescue_initrd(unsigned long min, unsigned long max)
{ {
unsigned long old_addr, addr, size;
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return safe_addr; return;
if (!initrd_data.start || !initrd_data.size) if (!get_physmem_reserved(RR_INITRD, &addr, &size))
return safe_addr; return;
if (initrd_data.start < safe_addr) { if (addr >= min && addr + size <= max)
memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size); return;
initrd_data.start = safe_addr; old_addr = addr;
} physmem_free(RR_INITRD);
return initrd_data.start + initrd_data.size; addr = physmem_alloc_top_down(RR_INITRD, size, 0);
memmove((void *)addr, (void *)old_addr, size);
} }
static void copy_bootdata(void) static void copy_bootdata(void)
...@@ -140,7 +140,7 @@ static void handle_relocs(unsigned long offset) ...@@ -140,7 +140,7 @@ static void handle_relocs(unsigned long offset)
* *
* Consider the following factors: * Consider the following factors:
* 1. max_physmem_end - end of physical memory online or standby. * 1. max_physmem_end - end of physical memory online or standby.
* Always <= end of the last online memory block (get_mem_detect_end()). * Always >= end of the last online memory range (get_physmem_online_end()).
* 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
* kernel is able to support. * kernel is able to support.
* 3. "mem=" kernel command line option which limits physical memory usage. * 3. "mem=" kernel command line option which limits physical memory usage.
...@@ -160,10 +160,10 @@ static void setup_ident_map_size(unsigned long max_physmem_end) ...@@ -160,10 +160,10 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
if (oldmem_data.start) { if (oldmem_data.start) {
kaslr_enabled = 0; __kaslr_enabled = 0;
ident_map_size = min(ident_map_size, oldmem_data.size); ident_map_size = min(ident_map_size, oldmem_data.size);
} else if (ipl_block_valid && is_ipl_block_dump()) { } else if (ipl_block_valid && is_ipl_block_dump()) {
kaslr_enabled = 0; __kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
ident_map_size = min(ident_map_size, hsa_size); ident_map_size = min(ident_map_size, hsa_size);
} }
...@@ -235,9 +235,9 @@ static unsigned long setup_kernel_memory_layout(void) ...@@ -235,9 +235,9 @@ static unsigned long setup_kernel_memory_layout(void)
/* /*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/ */
static void clear_bss_section(void) static void clear_bss_section(unsigned long vmlinux_lma)
{ {
memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size); memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
} }
/* /*
...@@ -256,7 +256,6 @@ static void setup_vmalloc_size(void) ...@@ -256,7 +256,6 @@ static void setup_vmalloc_size(void)
static void offset_vmlinux_info(unsigned long offset) static void offset_vmlinux_info(unsigned long offset)
{ {
vmlinux.default_lma += offset;
*(unsigned long *)(&vmlinux.entry) += offset; *(unsigned long *)(&vmlinux.entry) += offset;
vmlinux.bootdata_off += offset; vmlinux.bootdata_off += offset;
vmlinux.bootdata_preserved_off += offset; vmlinux.bootdata_preserved_off += offset;
...@@ -266,60 +265,83 @@ static void offset_vmlinux_info(unsigned long offset) ...@@ -266,60 +265,83 @@ static void offset_vmlinux_info(unsigned long offset)
vmlinux.init_mm_off += offset; vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset; vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset; vmlinux.invalid_pg_dir_off += offset;
} #ifdef CONFIG_KASAN
vmlinux.kasan_early_shadow_page_off += offset;
static unsigned long reserve_amode31(unsigned long safe_addr) vmlinux.kasan_early_shadow_pte_off += offset;
{ vmlinux.kasan_early_shadow_pmd_off += offset;
__amode31_base = PAGE_ALIGN(safe_addr); vmlinux.kasan_early_shadow_pud_off += offset;
return __amode31_base + vmlinux.amode31_size; vmlinux.kasan_early_shadow_p4d_off += offset;
#endif
} }
void startup_kernel(void) void startup_kernel(void)
{ {
unsigned long max_physmem_end; unsigned long max_physmem_end;
unsigned long random_lma; unsigned long vmlinux_lma = 0;
unsigned long safe_addr; unsigned long amode31_lma = 0;
unsigned long asce_limit; unsigned long asce_limit;
unsigned long safe_addr;
void *img; void *img;
psw_t psw; psw_t psw;
initrd_data.start = parmarea.initrd_start; setup_lpp();
initrd_data.size = parmarea.initrd_size; safe_addr = mem_safe_offset();
/*
* reserve decompressor memory together with decompression heap, buffer and
* memory which might be occupied by uncompressed kernel at default 1Mb
* position (if KASLR is off or failed).
*/
physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
oldmem_data.start = parmarea.oldmem_base; oldmem_data.start = parmarea.oldmem_base;
oldmem_data.size = parmarea.oldmem_size; oldmem_data.size = parmarea.oldmem_size;
setup_lpp();
store_ipl_parmblock(); store_ipl_parmblock();
safe_addr = mem_safe_offset(); read_ipl_report();
safe_addr = reserve_amode31(safe_addr);
safe_addr = read_ipl_report(safe_addr);
uv_query_info(); uv_query_info();
safe_addr = rescue_initrd(safe_addr);
sclp_early_read_info(); sclp_early_read_info();
setup_boot_command_line(); setup_boot_command_line();
parse_boot_command_line(); parse_boot_command_line();
detect_facilities(); detect_facilities();
sanitize_prot_virt_host(); sanitize_prot_virt_host();
max_physmem_end = detect_memory(&safe_addr); max_physmem_end = detect_max_physmem_end();
setup_ident_map_size(max_physmem_end); setup_ident_map_size(max_physmem_end);
setup_vmalloc_size(); setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout(); asce_limit = setup_kernel_memory_layout();
mem_detect_set_usable_limit(ident_map_size); /* got final ident_map_size, physmem allocations could be performed now */
physmem_set_usable_limit(ident_map_size);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) { detect_physmem_online_ranges(max_physmem_end);
random_lma = get_random_base(safe_addr); save_ipl_cert_comp_list();
if (random_lma) { rescue_initrd(safe_addr, ident_map_size);
__kaslr_offset = random_lma - vmlinux.default_lma;
img = (void *)vmlinux.default_lma; if (kaslr_enabled()) {
vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
THREAD_SIZE, vmlinux.default_lma,
ident_map_size);
if (vmlinux_lma) {
__kaslr_offset = vmlinux_lma - vmlinux.default_lma;
offset_vmlinux_info(__kaslr_offset); offset_vmlinux_info(__kaslr_offset);
} }
} }
vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel(); img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size); memmove((void *)vmlinux_lma, img, vmlinux.image_size);
} else if (__kaslr_offset) } else if (__kaslr_offset) {
memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size); img = (void *)vmlinux.default_lma;
memmove((void *)vmlinux_lma, img, vmlinux.image_size);
memset(img, 0, vmlinux.image_size);
}
/* vmlinux decompression is done, shrink reserved low memory */
physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
if (kaslr_enabled())
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
/* /*
* The order of the following operations is important: * The order of the following operations is important:
...@@ -334,21 +356,16 @@ void startup_kernel(void) ...@@ -334,21 +356,16 @@ void startup_kernel(void)
* - copy_bootdata() must follow setup_vmem() to propagate changes to * - copy_bootdata() must follow setup_vmem() to propagate changes to
* bootdata made by setup_vmem() * bootdata made by setup_vmem()
*/ */
clear_bss_section(); clear_bss_section(vmlinux_lma);
handle_relocs(__kaslr_offset); handle_relocs(__kaslr_offset);
setup_vmem(asce_limit); setup_vmem(asce_limit);
copy_bootdata(); copy_bootdata();
if (__kaslr_offset) {
/* /*
* Save KASLR offset for early dumps, before vmcore_info is set. * Save KASLR offset for early dumps, before vmcore_info is set.
* Mark as uneven to distinguish from real vmcore_info pointer. * Mark as uneven to distinguish from real vmcore_info pointer.
*/ */
S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL; S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
/* Clear non-relocated kernel */
if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
memset(img, 0, vmlinux.image_size);
}
/* /*
* Jump to the decompressed kernel entry point and switch DAT mode on. * Jump to the decompressed kernel entry point and switch DAT mode on.
......
This diff is collapsed.
...@@ -93,6 +93,8 @@ SECTIONS ...@@ -93,6 +93,8 @@ SECTIONS
_decompressor_syms_end = .; _decompressor_syms_end = .;
} }
_decompressor_end = .;
#ifdef CONFIG_KERNEL_UNCOMPRESSED #ifdef CONFIG_KERNEL_UNCOMPRESSED
. = 0x100000; . = 0x100000;
#else #else
......
...@@ -13,27 +13,28 @@ ...@@ -13,27 +13,28 @@
#define SP %r15 #define SP %r15
#define FRAME (16 * 8 + 4 * 8) #define FRAME (16 * 8 + 4 * 8)
.data .data
.align 32 .balign 32
.Lsigma:
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
.long 1,0,0,0
.long 2,0,0,0
.long 3,0,0,0
.long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
.long 0,1,2,3
.long 0x61707865,0x61707865,0x61707865,0x61707865 # smashed sigma
.long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
.long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
.long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
.previous SYM_DATA_START_LOCAL(sigma)
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
.long 1,0,0,0
.long 2,0,0,0
.long 3,0,0,0
.long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
.long 0,1,2,3
.long 0x61707865,0x61707865,0x61707865,0x61707865 # smashed sigma
.long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
.long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
.long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
SYM_DATA_END(sigma)
.previous
GEN_BR_THUNK %r14 GEN_BR_THUNK %r14
.text .text
############################################################################# #############################################################################
# void chacha20_vx_4x(u8 *out, counst u8 *inp, size_t len, # void chacha20_vx_4x(u8 *out, counst u8 *inp, size_t len,
...@@ -78,10 +79,10 @@ ...@@ -78,10 +79,10 @@
#define XT2 %v29 #define XT2 %v29
#define XT3 %v30 #define XT3 %v30
ENTRY(chacha20_vx_4x) SYM_FUNC_START(chacha20_vx_4x)
stmg %r6,%r7,6*8(SP) stmg %r6,%r7,6*8(SP)
larl %r7,.Lsigma larl %r7,sigma
lhi %r0,10 lhi %r0,10
lhi %r1,0 lhi %r1,0
...@@ -403,7 +404,7 @@ ENTRY(chacha20_vx_4x) ...@@ -403,7 +404,7 @@ ENTRY(chacha20_vx_4x)
lmg %r6,%r7,6*8(SP) lmg %r6,%r7,6*8(SP)
BR_EX %r14 BR_EX %r14
ENDPROC(chacha20_vx_4x) SYM_FUNC_END(chacha20_vx_4x)
#undef OUT #undef OUT
#undef INP #undef INP
...@@ -471,7 +472,7 @@ ENDPROC(chacha20_vx_4x) ...@@ -471,7 +472,7 @@ ENDPROC(chacha20_vx_4x)
#define T2 %v29 #define T2 %v29
#define T3 %v30 #define T3 %v30
ENTRY(chacha20_vx) SYM_FUNC_START(chacha20_vx)
clgfi LEN,256 clgfi LEN,256
jle chacha20_vx_4x jle chacha20_vx_4x
stmg %r6,%r7,6*8(SP) stmg %r6,%r7,6*8(SP)
...@@ -481,7 +482,7 @@ ENTRY(chacha20_vx) ...@@ -481,7 +482,7 @@ ENTRY(chacha20_vx)
la SP,0(%r1,SP) la SP,0(%r1,SP)
stg %r0,0(SP) # back-chain stg %r0,0(SP) # back-chain
larl %r7,.Lsigma larl %r7,sigma
lhi %r0,10 lhi %r0,10
VLM K1,K2,0,KEY,0 # load key VLM K1,K2,0,KEY,0 # load key
...@@ -902,6 +903,6 @@ ENTRY(chacha20_vx) ...@@ -902,6 +903,6 @@ ENTRY(chacha20_vx)
lmg %r6,%r7,FRAME+6*8(SP) lmg %r6,%r7,FRAME+6*8(SP)
la SP,FRAME(SP) la SP,FRAME(SP)
BR_EX %r14 BR_EX %r14
ENDPROC(chacha20_vx) SYM_FUNC_END(chacha20_vx)
.previous .previous
...@@ -24,8 +24,8 @@ ...@@ -24,8 +24,8 @@
#define CONST_RU_POLY %v13 #define CONST_RU_POLY %v13
#define CONST_CRC_POLY %v14 #define CONST_CRC_POLY %v14
.data .data
.align 8 .balign 8
/* /*
* The CRC-32 constant block contains reduction constants to fold and * The CRC-32 constant block contains reduction constants to fold and
...@@ -58,19 +58,20 @@ ...@@ -58,19 +58,20 @@
* P'(x) = 0xEDB88320 * P'(x) = 0xEDB88320
*/ */
.Lconstants_CRC_32_BE: SYM_DATA_START_LOCAL(constants_CRC_32_BE)
.quad 0x08833794c, 0x0e6228b11 # R1, R2 .quad 0x08833794c, 0x0e6228b11 # R1, R2
.quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4 .quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4
.quad 0x0f200aa66, 1 << 32 # R5, x32 .quad 0x0f200aa66, 1 << 32 # R5, x32
.quad 0x0490d678d, 1 # R6, 1 .quad 0x0490d678d, 1 # R6, 1
.quad 0x104d101df, 0 # u .quad 0x104d101df, 0 # u
.quad 0x104C11DB7, 0 # P(x) .quad 0x104C11DB7, 0 # P(x)
SYM_DATA_END(constants_CRC_32_BE)
.previous .previous
GEN_BR_THUNK %r14 GEN_BR_THUNK %r14
.text .text
/* /*
* The CRC-32 function(s) use these calling conventions: * The CRC-32 function(s) use these calling conventions:
* *
...@@ -90,9 +91,9 @@ ...@@ -90,9 +91,9 @@
* *
* V9..V14: CRC-32 constants. * V9..V14: CRC-32 constants.
*/ */
ENTRY(crc32_be_vgfm_16) SYM_FUNC_START(crc32_be_vgfm_16)
/* Load CRC-32 constants */ /* Load CRC-32 constants */
larl %r5,.Lconstants_CRC_32_BE larl %r5,constants_CRC_32_BE
VLM CONST_R1R2,CONST_CRC_POLY,0,%r5 VLM CONST_R1R2,CONST_CRC_POLY,0,%r5
/* Load the initial CRC value into the leftmost word of V0. */ /* Load the initial CRC value into the leftmost word of V0. */
...@@ -207,6 +208,6 @@ ENTRY(crc32_be_vgfm_16) ...@@ -207,6 +208,6 @@ ENTRY(crc32_be_vgfm_16)
.Ldone: .Ldone:
VLGVF %r2,%v2,3 VLGVF %r2,%v2,3
BR_EX %r14 BR_EX %r14
ENDPROC(crc32_be_vgfm_16) SYM_FUNC_END(crc32_be_vgfm_16)
.previous .previous
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
#define CONST_RU_POLY %v13 #define CONST_RU_POLY %v13
#define CONST_CRC_POLY %v14 #define CONST_CRC_POLY %v14
.data .data
.align 8 .balign 8
/* /*
* The CRC-32 constant block contains reduction constants to fold and * The CRC-32 constant block contains reduction constants to fold and
...@@ -59,27 +59,29 @@ ...@@ -59,27 +59,29 @@
* P'(x) = 0x82F63B78 * P'(x) = 0x82F63B78
*/ */
.Lconstants_CRC_32_LE: SYM_DATA_START_LOCAL(constants_CRC_32_LE)
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
.quad 0x1c6e41596, 0x154442bd4 # R2, R1 .quad 0x1c6e41596, 0x154442bd4 # R2, R1
.quad 0x0ccaa009e, 0x1751997d0 # R4, R3 .quad 0x0ccaa009e, 0x1751997d0 # R4, R3
.octa 0x163cd6124 # R5 .octa 0x163cd6124 # R5
.octa 0x1F7011641 # u' .octa 0x1F7011641 # u'
.octa 0x1DB710641 # P'(x) << 1 .octa 0x1DB710641 # P'(x) << 1
SYM_DATA_END(constants_CRC_32_LE)
.Lconstants_CRC_32C_LE: SYM_DATA_START_LOCAL(constants_CRC_32C_LE)
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
.quad 0x09e4addf8, 0x740eef02 # R2, R1 .quad 0x09e4addf8, 0x740eef02 # R2, R1
.quad 0x14cd00bd6, 0xf20c0dfe # R4, R3 .quad 0x14cd00bd6, 0xf20c0dfe # R4, R3
.octa 0x0dd45aab8 # R5 .octa 0x0dd45aab8 # R5
.octa 0x0dea713f1 # u' .octa 0x0dea713f1 # u'
.octa 0x105ec76f0 # P'(x) << 1 .octa 0x105ec76f0 # P'(x) << 1
SYM_DATA_END(constants_CRC_32C_LE)
.previous .previous
GEN_BR_THUNK %r14 GEN_BR_THUNK %r14
.text .text
/* /*
* The CRC-32 functions use these calling conventions: * The CRC-32 functions use these calling conventions:
...@@ -102,17 +104,17 @@ ...@@ -102,17 +104,17 @@
* V10..V14: CRC-32 constants. * V10..V14: CRC-32 constants.
*/ */
ENTRY(crc32_le_vgfm_16) SYM_FUNC_START(crc32_le_vgfm_16)
larl %r5,.Lconstants_CRC_32_LE larl %r5,constants_CRC_32_LE
j crc32_le_vgfm_generic j crc32_le_vgfm_generic
ENDPROC(crc32_le_vgfm_16) SYM_FUNC_END(crc32_le_vgfm_16)
ENTRY(crc32c_le_vgfm_16) SYM_FUNC_START(crc32c_le_vgfm_16)
larl %r5,.Lconstants_CRC_32C_LE larl %r5,constants_CRC_32C_LE
j crc32_le_vgfm_generic j crc32_le_vgfm_generic
ENDPROC(crc32c_le_vgfm_16) SYM_FUNC_END(crc32c_le_vgfm_16)
ENTRY(crc32_le_vgfm_generic) SYM_FUNC_START(crc32_le_vgfm_generic)
/* Load CRC-32 constants */ /* Load CRC-32 constants */
VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5 VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
...@@ -268,6 +270,6 @@ ENTRY(crc32_le_vgfm_generic) ...@@ -268,6 +270,6 @@ ENTRY(crc32_le_vgfm_generic)
.Ldone: .Ldone:
VLGVF %r2,%v2,2 VLGVF %r2,%v2,2
BR_EX %r14 BR_EX %r14
ENDPROC(crc32_le_vgfm_generic) SYM_FUNC_END(crc32_le_vgfm_generic)
.previous .previous
...@@ -43,10 +43,11 @@ struct ap_queue_status { ...@@ -43,10 +43,11 @@ struct ap_queue_status {
unsigned int queue_empty : 1; unsigned int queue_empty : 1;
unsigned int replies_waiting : 1; unsigned int replies_waiting : 1;
unsigned int queue_full : 1; unsigned int queue_full : 1;
unsigned int _pad1 : 4; unsigned int : 3;
unsigned int async : 1;
unsigned int irq_enabled : 1; unsigned int irq_enabled : 1;
unsigned int response_code : 8; unsigned int response_code : 8;
unsigned int _pad2 : 16; unsigned int : 16;
}; };
/* /*
...@@ -86,6 +87,42 @@ static inline bool ap_instructions_available(void) ...@@ -86,6 +87,42 @@ static inline bool ap_instructions_available(void)
return reg1 != 0; return reg1 != 0;
} }
/* TAPQ register GR2 response struct */
struct ap_tapq_gr2 {
union {
unsigned long value;
struct {
unsigned int fac : 32; /* facility bits */
unsigned int apinfo : 32; /* ap type, ... */
};
struct {
unsigned int s : 1; /* APSC */
unsigned int m : 1; /* AP4KM */
unsigned int c : 1; /* AP4KC */
unsigned int mode : 3;
unsigned int n : 1; /* APXA */
unsigned int : 1;
unsigned int class : 8;
unsigned int bs : 2; /* SE bind/assoc */
unsigned int : 14;
unsigned int at : 8; /* ap type */
unsigned int nd : 8; /* nr of domains */
unsigned int : 4;
unsigned int ml : 4; /* apxl ml */
unsigned int : 4;
unsigned int qd : 4; /* queue depth */
};
};
};
/*
* Convenience defines to be used with the bs field from struct ap_tapq_gr2
*/
#define AP_BS_Q_USABLE 0
#define AP_BS_Q_USABLE_NO_SECURE_KEY 1
#define AP_BS_Q_AVAIL_FOR_BINDING 2
#define AP_BS_Q_UNUSABLE 3
/** /**
* ap_tapq(): Test adjunct processor queue. * ap_tapq(): Test adjunct processor queue.
* @qid: The AP queue number * @qid: The AP queue number
...@@ -93,7 +130,7 @@ static inline bool ap_instructions_available(void) ...@@ -93,7 +130,7 @@ static inline bool ap_instructions_available(void)
* *
* Returns AP queue status structure. * Returns AP queue status structure.
*/ */
static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) static inline struct ap_queue_status ap_tapq(ap_qid_t qid, struct ap_tapq_gr2 *info)
{ {
union ap_queue_status_reg reg1; union ap_queue_status_reg reg1;
unsigned long reg2; unsigned long reg2;
...@@ -108,7 +145,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) ...@@ -108,7 +145,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
: [qid] "d" (qid) : [qid] "d" (qid)
: "cc", "0", "1", "2"); : "cc", "0", "1", "2");
if (info) if (info)
*info = reg2; info->value = reg2;
return reg1.status; return reg1.status;
} }
...@@ -116,13 +153,12 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) ...@@ -116,13 +153,12 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
* ap_test_queue(): Test adjunct processor queue. * ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number * @qid: The AP queue number
* @tbit: Test facilities bit * @tbit: Test facilities bit
* @info: Pointer to queue descriptor * @info: Ptr to tapq gr2 struct
* *
* Returns AP queue status structure. * Returns AP queue status structure.
*/ */
static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit,
int tbit, struct ap_tapq_gr2 *info)
unsigned long *info)
{ {
if (tbit) if (tbit)
qid |= 1UL << 23; /* set T bit*/ qid |= 1UL << 23; /* set T bit*/
...@@ -132,14 +168,18 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, ...@@ -132,14 +168,18 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
/** /**
* ap_pqap_rapq(): Reset adjunct processor queue. * ap_pqap_rapq(): Reset adjunct processor queue.
* @qid: The AP queue number * @qid: The AP queue number
* @fbit: if != 0 set F bit
* *
* Returns AP queue status structure. * Returns AP queue status structure.
*/ */
static inline struct ap_queue_status ap_rapq(ap_qid_t qid) static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
{ {
unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */ unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
union ap_queue_status_reg reg1; union ap_queue_status_reg reg1;
if (fbit)
reg0 |= 1UL << 22;
asm volatile( asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
...@@ -153,14 +193,18 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid) ...@@ -153,14 +193,18 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
/** /**
* ap_pqap_zapq(): Reset and zeroize adjunct processor queue. * ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
* @qid: The AP queue number * @qid: The AP queue number
* @fbit: if != 0 set F bit
* *
* Returns AP queue status structure. * Returns AP queue status structure.
*/ */
static inline struct ap_queue_status ap_zapq(ap_qid_t qid) static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
{ {
unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */ unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
union ap_queue_status_reg reg1; union ap_queue_status_reg reg1;
if (fbit)
reg0 |= 1UL << 22;
asm volatile( asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
...@@ -180,15 +224,16 @@ struct ap_config_info { ...@@ -180,15 +224,16 @@ struct ap_config_info {
unsigned int apxa : 1; /* N bit */ unsigned int apxa : 1; /* N bit */
unsigned int qact : 1; /* C bit */ unsigned int qact : 1; /* C bit */
unsigned int rc8a : 1; /* R bit */ unsigned int rc8a : 1; /* R bit */
unsigned char _reserved1 : 4; unsigned int : 4;
unsigned char _reserved2[3]; unsigned int apsb : 1; /* B bit */
unsigned char Na; /* max # of APs - 1 */ unsigned int : 23;
unsigned char Nd; /* max # of Domains - 1 */ unsigned char na; /* max # of APs - 1 */
unsigned char _reserved3[10]; unsigned char nd; /* max # of Domains - 1 */
unsigned char _reserved0[10];
unsigned int apm[8]; /* AP ID mask */ unsigned int apm[8]; /* AP ID mask */
unsigned int aqm[8]; /* AP (usage) queue mask */ unsigned int aqm[8]; /* AP (usage) queue mask */
unsigned int adm[8]; /* AP (control) domain mask */ unsigned int adm[8]; /* AP (control) domain mask */
unsigned char _reserved4[16]; unsigned char _reserved1[16];
} __aligned(8); } __aligned(8);
/** /**
...@@ -318,6 +363,59 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, ...@@ -318,6 +363,59 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
return reg1.status; return reg1.status;
} }
/*
* ap_bapq(): SE bind AP queue.
* @qid: The AP queue number
*
* Returns AP queue status structure.
*
* Invoking this function in a non-SE environment
* may case a specification exception.
*/
static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
{
unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */
union ap_queue_status_reg reg1;
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: [reg1] "=&d" (reg1.value)
: [reg0] "d" (reg0)
: "cc", "0", "1");
return reg1.status;
}
/*
* ap_aapq(): SE associate AP queue.
* @qid: The AP queue number
* @sec_idx: The secret index
*
* Returns AP queue status structure.
*
* Invoking this function in a non-SE environment
* may case a specification exception.
*/
static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
{
unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */
unsigned long reg2 = sec_idx;
union ap_queue_status_reg reg1;
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" lgr 2,%[reg2]\n" /* secret index into gr2 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: [reg1] "=&d" (reg1.value)
: [reg0] "d" (reg0), [reg2] "d" (reg2)
: "cc", "0", "1", "2");
return reg1.status;
}
/** /**
* ap_nqap(): Send message to adjunct processor queue. * ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number * @qid: The AP queue number
...@@ -359,10 +457,11 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid, ...@@ -359,10 +457,11 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* ap_dqap(): Receive message from adjunct processor queue. * ap_dqap(): Receive message from adjunct processor queue.
* @qid: The AP queue number * @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier * @psmid: Pointer to program supplied message identifier
* @msg: The message text * @msg: Pointer to message buffer
* @length: The message length * @msglen: Message buffer size
* @reslength: Resitual length on return * @length: Pointer to length of actually written bytes
* @resgr0: input: gr0 value (only used if != 0), output: resitual gr0 content * @reslength: Residual length on return
* @resgr0: input: gr0 value (only used if != 0), output: residual gr0 content
* *
* Returns AP queue status structure. * Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place * Condition code 1 on DQAP means the receive has taken place
...@@ -386,8 +485,9 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid, ...@@ -386,8 +485,9 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* *resgr0 is to be used instead of qid to further process this entry. * *resgr0 is to be used instead of qid to further process this entry.
*/ */
static inline struct ap_queue_status ap_dqap(ap_qid_t qid, static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
unsigned long long *psmid, unsigned long *psmid,
void *msg, size_t length, void *msg, size_t msglen,
size_t *length,
size_t *reslength, size_t *reslength,
unsigned long *resgr0) unsigned long *resgr0)
{ {
...@@ -399,7 +499,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid, ...@@ -399,7 +499,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
rp1.even = 0UL; rp1.even = 0UL;
rp1.odd = 0UL; rp1.odd = 0UL;
rp2.even = (unsigned long)msg; rp2.even = (unsigned long)msg;
rp2.odd = (unsigned long)length; rp2.odd = (unsigned long)msglen;
asm volatile( asm volatile(
" lgr 0,%[reg0]\n" /* qid param into gr0 */ " lgr 0,%[reg0]\n" /* qid param into gr0 */
...@@ -429,11 +529,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid, ...@@ -429,11 +529,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
if (resgr0) if (resgr0)
*resgr0 = reg0; *resgr0 = reg0;
} else { } else {
*psmid = (((unsigned long long)rp1.even) << 32) + rp1.odd; *psmid = (rp1.even << 32) + rp1.odd;
if (resgr0) if (resgr0)
*resgr0 = 0; *resgr0 = 0;
} }
/* update *length with the nr of bytes stored into the msg buffer */
if (length)
*length = msglen - rp2.odd;
return reg1.status; return reg1.status;
} }
......
...@@ -12,13 +12,7 @@ ...@@ -12,13 +12,7 @@
#ifndef _S390_CHECKSUM_H #ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H #define _S390_CHECKSUM_H
#ifdef CONFIG_GENERIC_CSUM #include <linux/kasan-checks.h>
#include <asm-generic/checksum.h>
#else /* CONFIG_GENERIC_CSUM */
#include <linux/uaccess.h>
#include <linux/in6.h> #include <linux/in6.h>
/* /*
...@@ -40,6 +34,7 @@ static inline __wsum csum_partial(const void *buff, int len, __wsum sum) ...@@ -40,6 +34,7 @@ static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
.odd = (unsigned long) len, .odd = (unsigned long) len,
}; };
kasan_check_read(buff, len);
asm volatile( asm volatile(
"0: cksm %[sum],%[rp]\n" "0: cksm %[sum],%[rp]\n"
" jo 0b\n" " jo 0b\n"
...@@ -135,5 +130,4 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ...@@ -135,5 +130,4 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold((__force __wsum)(sum >> 32)); return csum_fold((__force __wsum)(sum >> 32));
} }
#endif /* CONFIG_GENERIC_CSUM */
#endif /* _S390_CHECKSUM_H */ #endif /* _S390_CHECKSUM_H */
...@@ -90,7 +90,7 @@ struct diag8c { ...@@ -90,7 +90,7 @@ struct diag8c {
u8 num_partitions; u8 num_partitions;
u16 width; u16 width;
u16 height; u16 height;
u8 data[0]; u8 data[];
} __packed __aligned(4); } __packed __aligned(4);
extern int diag8c(struct diag8c *out, struct ccw_dev_id *devno); extern int diag8c(struct diag8c *out, struct ccw_dev_id *devno);
......
...@@ -60,9 +60,4 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, ...@@ -60,9 +60,4 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
static inline bool on_thread_stack(void)
{
return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
#endif #endif
...@@ -286,7 +286,7 @@ struct tccb_tcat { ...@@ -286,7 +286,7 @@ struct tccb_tcat {
*/ */
struct tccb { struct tccb {
struct tccb_tcah tcah; struct tccb_tcah tcah;
u8 tca[0]; u8 tca[];
} __attribute__ ((packed, aligned(8))); } __attribute__ ((packed, aligned(8)));
struct tcw *tcw_get_intrg(struct tcw *tcw); struct tcw *tcw_get_intrg(struct tcw *tcw);
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H #ifndef __ASM_KASAN_H
#define __ASM_KASAN_H #define __ASM_KASAN_H
#include <asm/pgtable.h> #include <linux/const.h>
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
...@@ -13,35 +13,6 @@ ...@@ -13,35 +13,6 @@
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET #define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
extern void kasan_early_init(void);
/*
* Estimate kasan memory requirements, which it will reserve
* at the very end of available physical memory. To estimate
* that, we take into account that kasan would require
* 1/8 of available physical memory (for shadow memory) +
* creating page tables for the shadow memory region.
* To keep page tables estimates simple take the double of
* combined ptes size.
*
* physmem parameter has to be already adjusted if not entire physical memory
* would be used (e.g. due to effect of "mem=" option).
*/
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
{
unsigned long kasan_needs;
unsigned long pages;
/* for shadow memory */
kasan_needs = round_up(physmem / 8, PAGE_SIZE);
/* for paging structures */
pages = DIV_ROUND_UP(kasan_needs, PAGE_SIZE);
kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
return kasan_needs;
}
#else
static inline void kasan_early_init(void) { }
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif #endif
#endif #endif
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#define __ALIGN .align 16, 0x07 #define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT, 0x07
#define __ALIGN_STR __stringify(__ALIGN) #define __ALIGN_STR __stringify(__ALIGN)
#endif #endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_MEM_DETECT_H
#define _ASM_S390_MEM_DETECT_H
#include <linux/types.h>
enum mem_info_source {
MEM_DETECT_NONE = 0,
MEM_DETECT_SCLP_STOR_INFO,
MEM_DETECT_DIAG260,
MEM_DETECT_SCLP_READ_INFO,
MEM_DETECT_BIN_SEARCH
};
struct mem_detect_block {
u64 start;
u64 end;
};
/*
* Storage element id is defined as 1 byte (up to 256 storage elements).
* In practise only storage element id 0 and 1 are used).
* According to architecture one storage element could have as much as
* 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
* If more mem_detect_blocks are required, a block of memory from already
* known mem_detect_block is taken (entries_extended points to it).
*/
#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
struct mem_detect_info {
u32 count;
u8 info_source;
unsigned long usable;
struct mem_detect_block entries[MEM_INLINED_ENTRIES];
struct mem_detect_block *entries_extended;
};
extern struct mem_detect_info mem_detect;
void add_mem_detect_block(u64 start, u64 end);
static inline int __get_mem_detect_block(u32 n, unsigned long *start,
unsigned long *end, bool respect_usable_limit)
{
if (n >= mem_detect.count) {
*start = 0;
*end = 0;
return -1;
}
if (n < MEM_INLINED_ENTRIES) {
*start = (unsigned long)mem_detect.entries[n].start;
*end = (unsigned long)mem_detect.entries[n].end;
} else {
*start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
*end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
}
if (respect_usable_limit && mem_detect.usable) {
if (*start >= mem_detect.usable)
return -1;
if (*end > mem_detect.usable)
*end = mem_detect.usable;
}
return 0;
}
/**
* for_each_mem_detect_usable_block - early online memory range iterator
* @i: an integer used as loop variable
* @p_start: ptr to unsigned long for start address of the range
* @p_end: ptr to unsigned long for end address of the range
*
* Walks over detected online memory ranges below usable limit.
*/
#define for_each_mem_detect_usable_block(i, p_start, p_end) \
for (i = 0; !__get_mem_detect_block(i, p_start, p_end, true); i++)
/* Walks over all detected online memory ranges disregarding usable limit. */
#define for_each_mem_detect_block(i, p_start, p_end) \
for (i = 0; !__get_mem_detect_block(i, p_start, p_end, false); i++)
static inline unsigned long get_mem_detect_usable_total(void)
{
unsigned long start, end, total = 0;
int i;
for_each_mem_detect_usable_block(i, &start, &end)
total += end - start;
return total;
}
static inline void get_mem_detect_reserved(unsigned long *start,
unsigned long *size)
{
*start = (unsigned long)mem_detect.entries_extended;
if (mem_detect.count > MEM_INLINED_ENTRIES)
*size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
else
*size = 0;
}
static inline unsigned long get_mem_detect_end(void)
{
unsigned long start;
unsigned long end;
if (mem_detect.usable)
return mem_detect.usable;
if (mem_detect.count) {
__get_mem_detect_block(mem_detect.count - 1, &start, &end, false);
return end;
}
return 0;
}
#endif
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#ifndef _ASM_S390_NOSPEC_ASM_H #ifndef _ASM_S390_NOSPEC_ASM_H
#define _ASM_S390_NOSPEC_ASM_H #define _ASM_S390_NOSPEC_ASM_H
#include <linux/linkage.h>
#include <asm/dwarf.h> #include <asm/dwarf.h>
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
...@@ -16,7 +17,7 @@ ...@@ -16,7 +17,7 @@
.macro __THUNK_PROLOG_NAME name .macro __THUNK_PROLOG_NAME name
#ifdef CONFIG_EXPOLINE_EXTERN #ifdef CONFIG_EXPOLINE_EXTERN
.pushsection .text,"ax",@progbits .pushsection .text,"ax",@progbits
.align 16,0x07 __ALIGN
#else #else
.pushsection .text.\name,"axG",@progbits,\name,comdat .pushsection .text.\name,"axG",@progbits,\name,comdat
#endif #endif
......
...@@ -60,7 +60,6 @@ struct perf_sf_sde_regs { ...@@ -60,7 +60,6 @@ struct perf_sf_sde_regs {
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ #define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ #define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE) PERF_CPUM_SF_DIAG_MODE)
#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ #define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
#define REG_NONE 0 #define REG_NONE 0
...@@ -71,7 +70,6 @@ struct perf_sf_sde_regs { ...@@ -71,7 +70,6 @@ struct perf_sf_sde_regs {
#define SAMPL_RATE(hwc) ((hwc)->event_base) #define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base) #define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) #define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) #define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#define perf_arch_fetch_caller_regs(regs, __ip) do { \ #define perf_arch_fetch_caller_regs(regs, __ip) do { \
......
...@@ -34,7 +34,7 @@ enum { ...@@ -34,7 +34,7 @@ enum {
PG_DIRECT_MAP_MAX PG_DIRECT_MAP_MAX
}; };
extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX]; extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
static inline void update_page_count(int level, long count) static inline void update_page_count(int level, long count)
{ {
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_MEM_DETECT_H
#define _ASM_S390_MEM_DETECT_H
#include <linux/types.h>
enum physmem_info_source {
MEM_DETECT_NONE = 0,
MEM_DETECT_SCLP_STOR_INFO,
MEM_DETECT_DIAG260,
MEM_DETECT_SCLP_READ_INFO,
MEM_DETECT_BIN_SEARCH
};
struct physmem_range {
u64 start;
u64 end;
};
enum reserved_range_type {
RR_DECOMPRESSOR,
RR_INITRD,
RR_VMLINUX,
RR_AMODE31,
RR_IPLREPORT,
RR_CERT_COMP_LIST,
RR_MEM_DETECT_EXTENDED,
RR_VMEM,
RR_MAX
};
struct reserved_range {
unsigned long start;
unsigned long end;
struct reserved_range *chain;
};
/*
* Storage element id is defined as 1 byte (up to 256 storage elements).
* In practise only storage element id 0 and 1 are used).
* According to architecture one storage element could have as much as
* 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
* If more physmem_ranges are required, a block of memory from already
* known physmem_range is taken (online_extended points to it).
*/
#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
struct physmem_info {
u32 range_count;
u8 info_source;
unsigned long usable;
struct reserved_range reserved[RR_MAX];
struct physmem_range online[MEM_INLINED_ENTRIES];
struct physmem_range *online_extended;
};
extern struct physmem_info physmem_info;
void add_physmem_online_range(u64 start, u64 end);
static inline int __get_physmem_range(u32 n, unsigned long *start,
unsigned long *end, bool respect_usable_limit)
{
if (n >= physmem_info.range_count) {
*start = 0;
*end = 0;
return -1;
}
if (n < MEM_INLINED_ENTRIES) {
*start = (unsigned long)physmem_info.online[n].start;
*end = (unsigned long)physmem_info.online[n].end;
} else {
*start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
*end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
}
if (respect_usable_limit && physmem_info.usable) {
if (*start >= physmem_info.usable)
return -1;
if (*end > physmem_info.usable)
*end = physmem_info.usable;
}
return 0;
}
/**
* for_each_physmem_usable_range - early online memory range iterator
* @i: an integer used as loop variable
* @p_start: ptr to unsigned long for start address of the range
* @p_end: ptr to unsigned long for end address of the range
*
* Walks over detected online memory ranges below usable limit.
*/
#define for_each_physmem_usable_range(i, p_start, p_end) \
for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
/* Walks over all detected online memory ranges disregarding usable limit. */
#define for_each_physmem_online_range(i, p_start, p_end) \
for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
static inline const char *get_physmem_info_source(void)
{
switch (physmem_info.info_source) {
case MEM_DETECT_SCLP_STOR_INFO:
return "sclp storage info";
case MEM_DETECT_DIAG260:
return "diag260";
case MEM_DETECT_SCLP_READ_INFO:
return "sclp read info";
case MEM_DETECT_BIN_SEARCH:
return "binary search";
}
return "none";
}
#define RR_TYPE_NAME(t) case RR_ ## t: return #t
static inline const char *get_rr_type_name(enum reserved_range_type t)
{
switch (t) {
RR_TYPE_NAME(DECOMPRESSOR);
RR_TYPE_NAME(INITRD);
RR_TYPE_NAME(VMLINUX);
RR_TYPE_NAME(AMODE31);
RR_TYPE_NAME(IPLREPORT);
RR_TYPE_NAME(CERT_COMP_LIST);
RR_TYPE_NAME(MEM_DETECT_EXTENDED);
RR_TYPE_NAME(VMEM);
default:
return "UNKNOWN";
}
}
#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
range && range->end; range = range->chain, \
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
struct reserved_range *range)
{
if (!range) {
range = &physmem_info.reserved[*t];
if (range->end)
return range;
}
if (range->chain)
return range->chain;
while (++*t < RR_MAX) {
range = &physmem_info.reserved[*t];
if (range->end)
return range;
}
return NULL;
}
#define for_each_physmem_reserved_range(t, range, p_start, p_end) \
for (t = 0, range = __physmem_reserved_next(&t, NULL), \
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0; \
range; range = __physmem_reserved_next(&t, range), \
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
unsigned long *addr, unsigned long *size)
{
*addr = physmem_info.reserved[type].start;
*size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
return *size;
}
#endif
...@@ -99,7 +99,6 @@ void cpu_detect_mhz_feature(void); ...@@ -99,7 +99,6 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op; extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void); extern void execve_tail(void);
extern void __bpon(void);
unsigned long vdso_size(void); unsigned long vdso_size(void);
/* /*
...@@ -119,6 +118,41 @@ unsigned long vdso_size(void); ...@@ -119,6 +118,41 @@ unsigned long vdso_size(void);
#define HAVE_ARCH_PICK_MMAP_LAYOUT #define HAVE_ARCH_PICK_MMAP_LAYOUT
#define __stackleak_poison __stackleak_poison
static __always_inline void __stackleak_poison(unsigned long erase_low,
unsigned long erase_high,
unsigned long poison)
{
unsigned long tmp, count;
count = erase_high - erase_low;
if (!count)
return;
asm volatile(
" cghi %[count],8\n"
" je 2f\n"
" aghi %[count],-(8+1)\n"
" srlg %[tmp],%[count],8\n"
" ltgr %[tmp],%[tmp]\n"
" jz 1f\n"
"0: stg %[poison],0(%[addr])\n"
" mvc 8(256-8,%[addr]),0(%[addr])\n"
" la %[addr],256(%[addr])\n"
" brctg %[tmp],0b\n"
"1: stg %[poison],0(%[addr])\n"
" larl %[tmp],3f\n"
" ex %[count],0(%[tmp])\n"
" j 4f\n"
"2: stg %[poison],0(%[addr])\n"
" j 4f\n"
"3: mvc 8(1,%[addr]),0(%[addr])\n"
"4:\n"
: [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
: [poison] "d" (poison)
: "memory", "cc"
);
}
/* /*
* Thread structure * Thread structure
*/ */
...@@ -227,6 +261,13 @@ static __always_inline unsigned long __current_stack_pointer(void) ...@@ -227,6 +261,13 @@ static __always_inline unsigned long __current_stack_pointer(void)
return sp; return sp;
} }
static __always_inline bool on_thread_stack(void)
{
unsigned long ksp = S390_lowcore.kernel_stack;
return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
static __always_inline unsigned short stap(void) static __always_inline unsigned short stap(void)
{ {
unsigned short cpu_address; unsigned short cpu_address;
...@@ -329,9 +370,6 @@ static __always_inline void __noreturn disabled_wait(void) ...@@ -329,9 +370,6 @@ static __always_inline void __noreturn disabled_wait(void)
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
extern int s390_isolate_bp(void);
extern int s390_isolate_bp_guest(void);
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{ {
return arch_irqs_disabled_flags(regs->psw.mask); return arch_irqs_disabled_flags(regs->psw.mask);
......
...@@ -6,11 +6,23 @@ ...@@ -6,11 +6,23 @@
extern struct mutex cpa_mutex; extern struct mutex cpa_mutex;
#define SET_MEMORY_RO 1UL enum {
#define SET_MEMORY_RW 2UL _SET_MEMORY_RO_BIT,
#define SET_MEMORY_NX 4UL _SET_MEMORY_RW_BIT,
#define SET_MEMORY_X 8UL _SET_MEMORY_NX_BIT,
#define SET_MEMORY_4K 16UL _SET_MEMORY_X_BIT,
_SET_MEMORY_4K_BIT,
_SET_MEMORY_INV_BIT,
_SET_MEMORY_DEF_BIT,
};
#define SET_MEMORY_RO BIT(_SET_MEMORY_RO_BIT)
#define SET_MEMORY_RW BIT(_SET_MEMORY_RW_BIT)
#define SET_MEMORY_NX BIT(_SET_MEMORY_NX_BIT)
#define SET_MEMORY_X BIT(_SET_MEMORY_X_BIT)
#define SET_MEMORY_4K BIT(_SET_MEMORY_4K_BIT)
#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT)
#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT)
int __set_memory(unsigned long addr, int numpages, unsigned long flags); int __set_memory(unsigned long addr, int numpages, unsigned long flags);
...@@ -34,9 +46,23 @@ static inline int set_memory_x(unsigned long addr, int numpages) ...@@ -34,9 +46,23 @@ static inline int set_memory_x(unsigned long addr, int numpages)
return __set_memory(addr, numpages, SET_MEMORY_X); return __set_memory(addr, numpages, SET_MEMORY_X);
} }
#define set_memory_rox set_memory_rox
static inline int set_memory_rox(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, SET_MEMORY_RO | SET_MEMORY_X);
}
static inline int set_memory_rwnx(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, SET_MEMORY_RW | SET_MEMORY_NX);
}
static inline int set_memory_4k(unsigned long addr, int numpages) static inline int set_memory_4k(unsigned long addr, int numpages)
{ {
return __set_memory(addr, numpages, SET_MEMORY_4K); return __set_memory(addr, numpages, SET_MEMORY_4K);
} }
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
#endif #endif
...@@ -74,10 +74,6 @@ extern unsigned int zlib_dfltcc_support; ...@@ -74,10 +74,6 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled; extern int noexec_disabled;
extern unsigned long ident_map_size; extern unsigned long ident_map_size;
extern unsigned long pgalloc_pos;
extern unsigned long pgalloc_end;
extern unsigned long pgalloc_low;
extern unsigned long __amode31_base;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */ /* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask; extern unsigned long mio_wb_bit_mask;
...@@ -150,13 +146,13 @@ static inline unsigned long kaslr_offset(void) ...@@ -150,13 +146,13 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset; return __kaslr_offset;
} }
extern int is_full_image; extern int __kaslr_enabled;
static inline int kaslr_enabled(void)
struct initrd_data { {
unsigned long start; if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
unsigned long size; return __kaslr_enabled;
}; return 0;
extern struct initrd_data initrd_data; }
struct oldmem_data { struct oldmem_data {
unsigned long start; unsigned long start;
...@@ -164,7 +160,7 @@ struct oldmem_data { ...@@ -164,7 +160,7 @@ struct oldmem_data {
}; };
extern struct oldmem_data oldmem_data; extern struct oldmem_data oldmem_data;
static inline u32 gen_lpswe(unsigned long addr) static __always_inline u32 gen_lpswe(unsigned long addr)
{ {
BUILD_BUG_ON(addr > 0xfff); BUILD_BUG_ON(addr > 0xfff);
return 0xb2b20000 | addr; return 0xb2b20000 | addr;
......
...@@ -189,17 +189,53 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task, ...@@ -189,17 +189,53 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
(rettype)r2; \ (rettype)r2; \
}) })
#define call_on_stack_noreturn(fn, stack) \ /*
* Use call_nodat() to call a function with DAT disabled.
* Proper sign and zero extension of function arguments is done.
* Usage:
*
* rc = call_nodat(nr, rettype, fn, t1, a1, t2, a2, ...)
*
* - nr specifies the number of function arguments of fn.
* - fn is the function to be called, where fn is a physical address.
* - rettype is the return type of fn.
* - t1, a1, ... are pairs, where t1 must match the type of the first
* argument of fn, t2 the second, etc. a1 is the corresponding
* first function argument (not name), etc.
*
* fn() is called with standard C function call ABI, with the exception
* that no useful stackframe or stackpointer is passed via register 15.
* Therefore the called function must not use r15 to access the stack.
*/
#define call_nodat(nr, rettype, fn, ...) \
({ \ ({ \
void (*__fn)(void) = fn; \ rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \
/* aligned since psw_leave must not cross page boundary */ \
psw_t __aligned(16) psw_leave; \
psw_t psw_enter; \
CALL_LARGS_##nr(__VA_ARGS__); \
CALL_REGS_##nr; \
\ \
CALL_TYPECHECK_##nr(__VA_ARGS__); \
psw_enter.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; \
psw_enter.addr = (unsigned long)__fn; \
asm volatile( \ asm volatile( \
" la 15,0(%[_stack])\n" \ " epsw 0,1\n" \
" xc %[_bc](8,15),%[_bc](15)\n" \ " risbg 1,0,0,31,32\n" \
" brasl 14,%[_fn]\n" \ " larl 7,1f\n" \
::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ " stg 1,%[psw_leave]\n" \
[_stack] "a" (stack), [_fn] "X" (__fn)); \ " stg 7,8+%[psw_leave]\n" \
BUG(); \ " la 7,%[psw_leave]\n" \
" lra 7,0(7)\n" \
" larl 1,0f\n" \
" lra 14,0(1)\n" \
" lpswe %[psw_enter]\n" \
"0: lpswe 0(7)\n" \
"1:\n" \
: CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
: [psw_enter] "Q" (psw_enter) \
: "7", CALL_CLOBBER_##nr); \
(rettype)r2; \
}) })
#endif /* _ASM_S390_STACKTRACE_H */ #endif /* _ASM_S390_STACKTRACE_H */
...@@ -55,18 +55,6 @@ char *strstr(const char *s1, const char *s2); ...@@ -55,18 +55,6 @@ char *strstr(const char *s1, const char *s2);
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
extern void *__memcpy(void *dest, const void *src, size_t n);
extern void *__memset(void *s, int c, size_t n);
extern void *__memmove(void *dest, const void *src, size_t n);
/*
* For files that are not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
*/
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
#define strlen(s) __strlen(s) #define strlen(s) __strlen(s)
#define __no_sanitize_prefix_strfunc(x) __##x #define __no_sanitize_prefix_strfunc(x) __##x
...@@ -79,6 +67,9 @@ extern void *__memmove(void *dest, const void *src, size_t n); ...@@ -79,6 +67,9 @@ extern void *__memmove(void *dest, const void *src, size_t n);
#define __no_sanitize_prefix_strfunc(x) x #define __no_sanitize_prefix_strfunc(x) x
#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */ #endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
void *__memcpy(void *dest, const void *src, size_t n);
void *__memset(void *s, int c, size_t n);
void *__memmove(void *dest, const void *src, size_t n);
void *__memset16(uint16_t *s, uint16_t v, size_t count); void *__memset16(uint16_t *s, uint16_t v, size_t count);
void *__memset32(uint32_t *s, uint32_t v, size_t count); void *__memset32(uint32_t *s, uint32_t v, size_t count);
void *__memset64(uint64_t *s, uint64_t v, size_t count); void *__memset64(uint64_t *s, uint64_t v, size_t count);
......
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
#define _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H
#include <linux/bits.h> #include <linux/bits.h>
#ifndef ASM_OFFSETS_C
#include <asm/asm-offsets.h>
#endif
/* /*
* General size of kernel stacks * General size of kernel stacks
...@@ -21,13 +24,12 @@ ...@@ -21,13 +24,12 @@
#define BOOT_STACK_SIZE (PAGE_SIZE << 2) #define BOOT_STACK_SIZE (PAGE_SIZE << 2)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/page.h> #include <asm/page.h>
#define STACK_INIT_OFFSET \
(THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
/* /*
* low level task data that entry.S needs immediate access to * low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line * - this struct should fit entirely inside of one cache line
...@@ -70,7 +72,6 @@ void arch_setup_new_exec(void); ...@@ -70,7 +72,6 @@ void arch_setup_new_exec(void);
#define TIF_PATCH_PENDING 5 /* pending live patching update */ #define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */ #define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */ #define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
...@@ -94,7 +95,6 @@ void arch_setup_new_exec(void); ...@@ -94,7 +95,6 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE BIT(TIF_UPROBE) #define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) #define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP) #define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
......
...@@ -60,7 +60,7 @@ typedef struct { ...@@ -60,7 +60,7 @@ typedef struct {
* except of floats, and long long (32 bit) * except of floats, and long long (32 bit)
* *
*/ */
long args[0]; long args[];
} debug_sprintf_entry_t; } debug_sprintf_entry_t;
/* internal function prototyes */ /* internal function prototyes */
...@@ -981,16 +981,6 @@ static struct ctl_table s390dbf_table[] = { ...@@ -981,16 +981,6 @@ static struct ctl_table s390dbf_table[] = {
{ } { }
}; };
static struct ctl_table s390dbf_dir_table[] = {
{
.procname = "s390dbf",
.maxlen = 0,
.mode = S_IRUGO | S_IXUGO,
.child = s390dbf_table,
},
{ }
};
static struct ctl_table_header *s390dbf_sysctl_header; static struct ctl_table_header *s390dbf_sysctl_header;
/** /**
...@@ -1574,7 +1564,7 @@ static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view, ...@@ -1574,7 +1564,7 @@ static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
*/ */
static int __init debug_init(void) static int __init debug_init(void)
{ {
s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); s390dbf_sysctl_header = register_sysctl("s390dbf", s390dbf_table);
mutex_lock(&debug_mutex); mutex_lock(&debug_mutex);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL); debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL);
initialized = 1; initialized = 1;
......
...@@ -41,60 +41,50 @@ const char *stack_type_name(enum stack_type type) ...@@ -41,60 +41,50 @@ const char *stack_type_name(enum stack_type type)
EXPORT_SYMBOL_GPL(stack_type_name); EXPORT_SYMBOL_GPL(stack_type_name);
static inline bool in_stack(unsigned long sp, struct stack_info *info, static inline bool in_stack(unsigned long sp, struct stack_info *info,
enum stack_type type, unsigned long low, enum stack_type type, unsigned long stack)
unsigned long high)
{ {
if (sp < low || sp >= high) if (sp < stack || sp >= stack + THREAD_SIZE)
return false; return false;
info->type = type; info->type = type;
info->begin = low; info->begin = stack;
info->end = high; info->end = stack + THREAD_SIZE;
return true; return true;
} }
static bool in_task_stack(unsigned long sp, struct task_struct *task, static bool in_task_stack(unsigned long sp, struct task_struct *task,
struct stack_info *info) struct stack_info *info)
{ {
unsigned long stack; unsigned long stack = (unsigned long)task_stack_page(task);
stack = (unsigned long) task_stack_page(task); return in_stack(sp, info, STACK_TYPE_TASK, stack);
return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE);
} }
static bool in_irq_stack(unsigned long sp, struct stack_info *info) static bool in_irq_stack(unsigned long sp, struct stack_info *info)
{ {
unsigned long frame_size, top; unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); return in_stack(sp, info, STACK_TYPE_IRQ, stack);
top = S390_lowcore.async_stack + frame_size;
return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top);
} }
static bool in_nodat_stack(unsigned long sp, struct stack_info *info) static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
{ {
unsigned long frame_size, top; unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); return in_stack(sp, info, STACK_TYPE_NODAT, stack);
top = S390_lowcore.nodat_stack + frame_size;
return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
} }
static bool in_mcck_stack(unsigned long sp, struct stack_info *info) static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
{ {
unsigned long frame_size, top; unsigned long stack = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); return in_stack(sp, info, STACK_TYPE_MCCK, stack);
top = S390_lowcore.mcck_stack + frame_size;
return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
} }
static bool in_restart_stack(unsigned long sp, struct stack_info *info) static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{ {
unsigned long frame_size, top; unsigned long stack = S390_lowcore.restart_stack - STACK_INIT_OFFSET;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); return in_stack(sp, info, STACK_TYPE_RESTART, stack);
top = S390_lowcore.restart_stack + frame_size;
return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top);
} }
int get_stack_info(unsigned long sp, struct task_struct *task, int get_stack_info(unsigned long sp, struct task_struct *task,
...@@ -152,7 +142,13 @@ void show_stack(struct task_struct *task, unsigned long *stack, ...@@ -152,7 +142,13 @@ void show_stack(struct task_struct *task, unsigned long *stack,
static void show_last_breaking_event(struct pt_regs *regs) static void show_last_breaking_event(struct pt_regs *regs)
{ {
printk("Last Breaking-Event-Address:\n"); printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] %pSR\n", regs->last_break, (void *)regs->last_break); printk(" [<%016lx>] ", regs->last_break);
if (user_mode(regs)) {
print_vma_addr(KERN_CONT, regs->last_break);
pr_cont("\n");
} else {
pr_cont("%pSR\n", (void *)regs->last_break);
}
} }
void show_registers(struct pt_regs *regs) void show_registers(struct pt_regs *regs)
......
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include "entry.h" #include "entry.h"
int __bootdata(is_full_image);
#define decompressor_handled_param(param) \ #define decompressor_handled_param(param) \
static int __init ignore_decompressor_param_##param(char *s) \ static int __init ignore_decompressor_param_##param(char *s) \
{ \ { \
...@@ -53,6 +51,14 @@ decompressor_handled_param(nokaslr); ...@@ -53,6 +51,14 @@ decompressor_handled_param(nokaslr);
decompressor_handled_param(prot_virt); decompressor_handled_param(prot_virt);
#endif #endif
static void __init kasan_early_init(void)
{
#ifdef CONFIG_KASAN
init_task.kasan_depth = 0;
sclp_early_printk("KernelAddressSanitizer initialized\n");
#endif
}
static void __init reset_tod_clock(void) static void __init reset_tod_clock(void)
{ {
union tod_clock clk; union tod_clock clk;
...@@ -288,17 +294,6 @@ static void __init setup_boot_command_line(void) ...@@ -288,17 +294,6 @@ static void __init setup_boot_command_line(void)
strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE); strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
} }
static void __init check_image_bootable(void)
{
if (is_full_image)
return;
sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
disabled_wait();
}
static void __init sort_amode31_extable(void) static void __init sort_amode31_extable(void)
{ {
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table); sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
...@@ -306,8 +301,8 @@ static void __init sort_amode31_extable(void) ...@@ -306,8 +301,8 @@ static void __init sort_amode31_extable(void)
void __init startup_init(void) void __init startup_init(void)
{ {
kasan_early_init();
reset_tod_clock(); reset_tod_clock();
check_image_bootable();
time_early_init(); time_early_init();
init_kernel_storage_key(); init_kernel_storage_key();
lockdep_off(); lockdep_off();
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
ENTRY(early_pgm_check_handler) SYM_CODE_START(early_pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA_SYNC stmg %r8,%r15,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) la %r11,STACK_FRAME_OVERHEAD(%r15)
...@@ -20,4 +20,4 @@ ENTRY(early_pgm_check_handler) ...@@ -20,4 +20,4 @@ ENTRY(early_pgm_check_handler)
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
lpswe __LC_RETURN_PSW lpswe __LC_RETURN_PSW
ENDPROC(early_pgm_check_handler) SYM_CODE_END(early_pgm_check_handler)
This diff is collapsed.
...@@ -49,26 +49,6 @@ struct ftrace_insn { ...@@ -49,26 +49,6 @@ struct ftrace_insn {
s32 disp; s32 disp;
} __packed; } __packed;
asm(
" .align 16\n"
"ftrace_shared_hotpatch_trampoline_br:\n"
" lmg %r0,%r1,2(%r1)\n"
" br %r1\n"
"ftrace_shared_hotpatch_trampoline_br_end:\n"
);
#ifdef CONFIG_EXPOLINE
asm(
" .align 16\n"
"ftrace_shared_hotpatch_trampoline_exrl:\n"
" lmg %r0,%r1,2(%r1)\n"
" exrl %r0,0f\n"
" j .\n"
"0: br %r1\n"
"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
);
#endif /* CONFIG_EXPOLINE */
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
static char *ftrace_plt; static char *ftrace_plt;
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
...@@ -246,7 +226,7 @@ static int __init ftrace_plt_init(void) ...@@ -246,7 +226,7 @@ static int __init ftrace_plt_init(void)
start = ftrace_shared_hotpatch_trampoline(&end); start = ftrace_shared_hotpatch_trampoline(&end);
memcpy(ftrace_plt, start, end - start); memcpy(ftrace_plt, start, end - start);
set_memory_ro((unsigned long)ftrace_plt, 1); set_memory_rox((unsigned long)ftrace_plt, 1);
return 0; return 0;
} }
device_initcall(ftrace_plt_init); device_initcall(ftrace_plt_init);
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
__HEAD __HEAD
ENTRY(startup_continue) SYM_CODE_START(startup_continue)
larl %r1,tod_clock_base larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK mvc 0(16,%r1),__LC_BOOT_CLOCK
# #
...@@ -24,19 +24,17 @@ ENTRY(startup_continue) ...@@ -24,19 +24,17 @@ ENTRY(startup_continue)
# #
larl %r14,init_task larl %r14,init_task
stg %r14,__LC_CURRENT stg %r14,__LC_CURRENT
larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE larl %r15,init_thread_union+STACK_INIT_OFFSET
stg %r15,__LC_KERNEL_STACK
brasl %r14,sclp_early_adjust_va # allow sclp_early_printk brasl %r14,sclp_early_adjust_va # allow sclp_early_printk
#ifdef CONFIG_KASAN
brasl %r14,kasan_early_init
#endif
brasl %r14,startup_init # s390 specific early init brasl %r14,startup_init # s390 specific early init
brasl %r14,start_kernel # common init code brasl %r14,start_kernel # common init code
# #
# We returned from start_kernel ?!? PANIK # We returned from start_kernel ?!? PANIK
# #
basr %r13,0 basr %r13,0
lpswe .Ldw-.(%r13) # load disabled wait psw lpswe dw_psw-.(%r13) # load disabled wait psw
SYM_CODE_END(startup_continue)
.align 16 .align 16
.LPG1: SYM_DATA_LOCAL(dw_psw, .quad 0x0002000180000000,0x0000000000000000)
.Ldw: .quad 0x0002000180000000,0x0000000000000000
...@@ -176,11 +176,11 @@ static bool reipl_fcp_clear; ...@@ -176,11 +176,11 @@ static bool reipl_fcp_clear;
static bool reipl_ccw_clear; static bool reipl_ccw_clear;
static bool reipl_eckd_clear; static bool reipl_eckd_clear;
static inline int __diag308(unsigned long subcode, void *addr) static inline int __diag308(unsigned long subcode, unsigned long addr)
{ {
union register_pair r1; union register_pair r1;
r1.even = (unsigned long) addr; r1.even = addr;
r1.odd = 0; r1.odd = 0;
asm volatile( asm volatile(
" diag %[r1],%[subcode],0x308\n" " diag %[r1],%[subcode],0x308\n"
...@@ -195,7 +195,7 @@ static inline int __diag308(unsigned long subcode, void *addr) ...@@ -195,7 +195,7 @@ static inline int __diag308(unsigned long subcode, void *addr)
int diag308(unsigned long subcode, void *addr) int diag308(unsigned long subcode, void *addr)
{ {
diag_stat_inc(DIAG_STAT_X308); diag_stat_inc(DIAG_STAT_X308);
return __diag308(subcode, addr); return __diag308(subcode, addr ? virt_to_phys(addr) : 0);
} }
EXPORT_SYMBOL_GPL(diag308); EXPORT_SYMBOL_GPL(diag308);
...@@ -649,7 +649,6 @@ static struct kset *ipl_kset; ...@@ -649,7 +649,6 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused) static void __ipl_run(void *unused)
{ {
__bpon();
diag308(DIAG308_LOAD_CLEAR, NULL); diag308(DIAG308_LOAD_CLEAR, NULL);
} }
......
...@@ -41,7 +41,7 @@ void *alloc_insn_page(void) ...@@ -41,7 +41,7 @@ void *alloc_insn_page(void)
page = module_alloc(PAGE_SIZE); page = module_alloc(PAGE_SIZE);
if (!page) if (!page)
return NULL; return NULL;
__set_memory((unsigned long) page, 1, SET_MEMORY_RO | SET_MEMORY_X); set_memory_rox((unsigned long)page, 1);
return page; return page;
} }
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
*/ */
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
.align 4096 .align 4096
ENTRY(kprobes_insn_page) SYM_CODE_START(kprobes_insn_page)
.rept 2048 .rept 2048
.word 0x07fe .word 0x07fe
.endr .endr
ENDPROC(kprobes_insn_page) SYM_CODE_END(kprobes_insn_page)
.previous .previous
...@@ -29,8 +29,8 @@ ...@@ -29,8 +29,8 @@
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/sclp.h> #include <asm/sclp.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long, typedef void (*relocate_kernel_t)(unsigned long, unsigned long, unsigned long);
unsigned long); typedef int (*purgatory_t)(int);
extern const unsigned char relocate_kernel[]; extern const unsigned char relocate_kernel[];
extern const unsigned long long relocate_kernel_len; extern const unsigned long long relocate_kernel_len;
...@@ -41,11 +41,14 @@ extern const unsigned long long relocate_kernel_len; ...@@ -41,11 +41,14 @@ extern const unsigned long long relocate_kernel_len;
* Reset the system, copy boot CPU registers to absolute zero, * Reset the system, copy boot CPU registers to absolute zero,
* and jump to the kdump image * and jump to the kdump image
*/ */
static void __do_machine_kdump(void *image) static void __do_machine_kdump(void *data)
{ {
int (*start_kdump)(int); struct kimage *image = data;
purgatory_t purgatory;
unsigned long prefix; unsigned long prefix;
purgatory = (purgatory_t)image->start;
/* store_status() saved the prefix register to lowcore */ /* store_status() saved the prefix register to lowcore */
prefix = (unsigned long) S390_lowcore.prefixreg_save_area; prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
...@@ -58,13 +61,11 @@ static void __do_machine_kdump(void *image) ...@@ -58,13 +61,11 @@ static void __do_machine_kdump(void *image)
* prefix register of this CPU to zero * prefix register of this CPU to zero
*/ */
memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA), memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
(void *)(prefix + __LC_FPREGS_SAVE_AREA), 512); phys_to_virt(prefix + __LC_FPREGS_SAVE_AREA), 512);
__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); call_nodat(1, int, purgatory, int, 1);
start_kdump = (void *)((struct kimage *) image)->start;
start_kdump(1);
/* Die if start_kdump returns */ /* Die if kdump returns */
disabled_wait(); disabled_wait();
} }
...@@ -111,18 +112,6 @@ static noinline void __machine_kdump(void *image) ...@@ -111,18 +112,6 @@ static noinline void __machine_kdump(void *image)
store_status(__do_machine_kdump, image); store_status(__do_machine_kdump, image);
} }
static unsigned long do_start_kdump(unsigned long addr)
{
struct kimage *image = (struct kimage *) addr;
int (*start_kdump)(int) = (void *)image->start;
int rc;
__arch_local_irq_stnsm(0xfb); /* disable DAT */
rc = start_kdump(0);
__arch_local_irq_stosm(0x04); /* enable DAT */
return rc;
}
#endif /* CONFIG_CRASH_DUMP */ #endif /* CONFIG_CRASH_DUMP */
/* /*
...@@ -131,12 +120,10 @@ static unsigned long do_start_kdump(unsigned long addr) ...@@ -131,12 +120,10 @@ static unsigned long do_start_kdump(unsigned long addr)
static bool kdump_csum_valid(struct kimage *image) static bool kdump_csum_valid(struct kimage *image)
{ {
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
purgatory_t purgatory = (purgatory_t)image->start;
int rc; int rc;
preempt_disable(); rc = call_nodat(1, int, purgatory, int, 0);
rc = call_on_stack(1, S390_lowcore.nodat_stack, unsigned long, do_start_kdump,
unsigned long, (unsigned long)image);
preempt_enable();
return rc == 0; return rc == 0;
#else #else
return false; return false;
...@@ -210,7 +197,7 @@ int machine_kexec_prepare(struct kimage *image) ...@@ -210,7 +197,7 @@ int machine_kexec_prepare(struct kimage *image)
return -EINVAL; return -EINVAL;
/* Get the destination where the assembler code should be copied to.*/ /* Get the destination where the assembler code should be copied to.*/
reboot_code_buffer = (void *) page_to_phys(image->control_code_page); reboot_code_buffer = page_to_virt(image->control_code_page);
/* Then copy it */ /* Then copy it */
memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len); memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
...@@ -250,19 +237,20 @@ void machine_crash_shutdown(struct pt_regs *regs) ...@@ -250,19 +237,20 @@ void machine_crash_shutdown(struct pt_regs *regs)
*/ */
static void __do_machine_kexec(void *data) static void __do_machine_kexec(void *data)
{ {
unsigned long diag308_subcode; unsigned long data_mover, entry, diag308_subcode;
relocate_kernel_t data_mover;
struct kimage *image = data; struct kimage *image = data;
s390_reset_system(); data_mover = page_to_phys(image->control_code_page);
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); entry = virt_to_phys(&image->head);
__arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
/* Call the moving routine */
diag308_subcode = DIAG308_CLEAR_RESET; diag308_subcode = DIAG308_CLEAR_RESET;
if (sclp.has_iplcc) if (sclp.has_iplcc)
diag308_subcode |= DIAG308_FLAG_EI; diag308_subcode |= DIAG308_FLAG_EI;
(*data_mover)(&image->head, image->start, diag308_subcode); s390_reset_system();
call_nodat(3, void, (relocate_kernel_t)data_mover,
unsigned long, entry,
unsigned long, image->start,
unsigned long, diag308_subcode);
/* Die if kexec returns */ /* Die if kexec returns */
disabled_wait(); disabled_wait();
......
...@@ -28,9 +28,9 @@ ...@@ -28,9 +28,9 @@
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
ENTRY(ftrace_stub) SYM_FUNC_START(ftrace_stub)
BR_EX %r14 BR_EX %r14
ENDPROC(ftrace_stub) SYM_FUNC_END(ftrace_stub)
SYM_CODE_START(ftrace_stub_direct_tramp) SYM_CODE_START(ftrace_stub_direct_tramp)
lgr %r1, %r0 lgr %r1, %r0
...@@ -140,10 +140,25 @@ SYM_FUNC_END(return_to_handler) ...@@ -140,10 +140,25 @@ SYM_FUNC_END(return_to_handler)
#endif #endif
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_RETHOOK SYM_CODE_START(ftrace_shared_hotpatch_trampoline_br)
lmg %r0,%r1,2(%r1)
br %r1
SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_br_end, SYM_L_GLOBAL)
SYM_CODE_END(ftrace_shared_hotpatch_trampoline_br)
#ifdef CONFIG_EXPOLINE
SYM_CODE_START(ftrace_shared_hotpatch_trampoline_exrl)
lmg %r0,%r1,2(%r1)
exrl %r0,0f
j .
0: br %r1
SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_exrl_end, SYM_L_GLOBAL)
SYM_CODE_END(ftrace_shared_hotpatch_trampoline_exrl)
#endif /* CONFIG_EXPOLINE */
SYM_FUNC_START(arch_rethook_trampoline) #ifdef CONFIG_RETHOOK
SYM_CODE_START(arch_rethook_trampoline)
stg %r14,(__SF_GPRS+8*8)(%r15) stg %r14,(__SF_GPRS+8*8)(%r15)
lay %r15,-STACK_FRAME_SIZE(%r15) lay %r15,-STACK_FRAME_SIZE(%r15)
stmg %r0,%r14,STACK_PTREGS_GPRS(%r15) stmg %r0,%r14,STACK_PTREGS_GPRS(%r15)
...@@ -166,7 +181,6 @@ SYM_FUNC_START(arch_rethook_trampoline) ...@@ -166,7 +181,6 @@ SYM_FUNC_START(arch_rethook_trampoline)
mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15) mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15)
lmg %r0,%r15,STACK_PTREGS_GPRS(%r15) lmg %r0,%r15,STACK_PTREGS_GPRS(%r15)
lpswe __SF_EMPTY(%r15) lpswe __SF_EMPTY(%r15)
SYM_CODE_END(arch_rethook_trampoline)
SYM_FUNC_END(arch_rethook_trampoline)
#endif /* CONFIG_RETHOOK */ #endif /* CONFIG_RETHOOK */
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/ftrace.lds.h> #include <asm/ftrace.lds.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/setup.h>
#if 0 #if 0
#define DEBUGP printk #define DEBUGP printk
...@@ -35,6 +36,24 @@ ...@@ -35,6 +36,24 @@
#define PLT_ENTRY_SIZE 22 #define PLT_ENTRY_SIZE 22
static unsigned long get_module_load_offset(void)
{
static DEFINE_MUTEX(module_kaslr_mutex);
static unsigned long module_load_offset;
if (!kaslr_enabled())
return 0;
/*
* Calculate the module_load_offset the first time this code
* is called. Once calculated it stays the same until reboot.
*/
mutex_lock(&module_kaslr_mutex);
if (!module_load_offset)
module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
mutex_unlock(&module_kaslr_mutex);
return module_load_offset;
}
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
gfp_t gfp_mask = GFP_KERNEL; gfp_t gfp_mask = GFP_KERNEL;
...@@ -42,9 +61,11 @@ void *module_alloc(unsigned long size) ...@@ -42,9 +61,11 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN) if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL; return NULL;
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, p = __vmalloc_node_range(size, MODULE_ALIGN,
gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE, MODULES_VADDR + get_module_load_offset(),
__builtin_return_address(0)); MODULES_END, gfp_mask, PAGE_KERNEL,
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
NUMA_NO_NODE, __builtin_return_address(0));
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
vfree(p); vfree(p);
return NULL; return NULL;
...@@ -491,7 +512,7 @@ static int module_alloc_ftrace_hotpatch_trampolines(struct module *me, ...@@ -491,7 +512,7 @@ static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
start = module_alloc(numpages * PAGE_SIZE); start = module_alloc(numpages * PAGE_SIZE);
if (!start) if (!start)
return -ENOMEM; return -ENOMEM;
set_memory_ro((unsigned long)start, numpages); set_memory_rox((unsigned long)start, numpages);
end = start + size; end = start + size;
me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start; me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
......
This diff is collapsed.
...@@ -882,10 +882,6 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -882,10 +882,6 @@ static int __hw_perf_event_init(struct perf_event *event)
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
} }
/* Check and set other sampling flags */
if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
err = __hw_perf_event_init_rate(event, &si); err = __hw_perf_event_init_rate(event, &si);
if (err) if (err)
goto out; goto out;
...@@ -1293,11 +1289,8 @@ static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t ...@@ -1293,11 +1289,8 @@ static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t
* The sampling buffer position are retrieved and saved in the TEAR_REG * The sampling buffer position are retrieved and saved in the TEAR_REG
* register of the specified perf event. * register of the specified perf event.
* *
* Only full sample-data-blocks are processed. Specify the flash_all flag * Only full sample-data-blocks are processed. Specify the flush_all flag
* to also walk through partially filled sample-data-blocks. It is ignored * to also walk through partially filled sample-data-blocks.
* if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag
* enforces the processing of full sample-data-blocks only (trailer entries
* with the block-full-indicator bit set).
*/ */
static void hw_perf_event_update(struct perf_event *event, int flush_all) static void hw_perf_event_update(struct perf_event *event, int flush_all)
{ {
...@@ -1315,9 +1308,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) ...@@ -1315,9 +1308,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
if (SAMPL_DIAG_MODE(&event->hw)) if (SAMPL_DIAG_MODE(&event->hw))
return; return;
if (flush_all && SDB_FULL_BLOCKS(hwc))
flush_all = 0;
sdbt = (unsigned long *) TEAR_REG(hwc); sdbt = (unsigned long *) TEAR_REG(hwc);
done = event_overflow = sampl_overflow = num_sdb = 0; done = event_overflow = sampl_overflow = num_sdb = 0;
while (!done) { while (!done) {
......
...@@ -136,12 +136,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) ...@@ -136,12 +136,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.last_break = 1; p->thread.last_break = 1;
frame->sf.back_chain = 0; frame->sf.back_chain = 0;
frame->sf.gprs[5] = (unsigned long)frame + sizeof(struct stack_frame); frame->sf.gprs[11 - 6] = (unsigned long)&frame->childregs;
frame->sf.gprs[6] = (unsigned long)p; frame->sf.gprs[12 - 6] = (unsigned long)p;
/* new return point is ret_from_fork */ /* new return point is ret_from_fork */
frame->sf.gprs[8] = (unsigned long)ret_from_fork; frame->sf.gprs[14 - 6] = (unsigned long)ret_from_fork;
/* fake return stack for resume(), don't go back to schedule */ /* fake return stack for resume(), don't go back to schedule */
frame->sf.gprs[9] = (unsigned long)frame; frame->sf.gprs[15 - 6] = (unsigned long)frame;
/* Store access registers to kernel stack of new process. */ /* Store access registers to kernel stack of new process. */
if (unlikely(args->fn)) { if (unlikely(args->fn)) {
...@@ -149,8 +149,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) ...@@ -149,8 +149,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
memset(&frame->childregs, 0, sizeof(struct pt_regs)); memset(&frame->childregs, 0, sizeof(struct pt_regs));
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO |
PSW_MASK_EXT | PSW_MASK_MCHECK; PSW_MASK_EXT | PSW_MASK_MCHECK;
frame->childregs.psw.addr =
(unsigned long)__ret_from_fork;
frame->childregs.gprs[9] = (unsigned long)args->fn; frame->childregs.gprs[9] = (unsigned long)args->fn;
frame->childregs.gprs[10] = (unsigned long)args->fn_arg; frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
frame->childregs.orig_gpr2 = -1; frame->childregs.orig_gpr2 = -1;
......
...@@ -364,21 +364,3 @@ const struct seq_operations cpuinfo_op = { ...@@ -364,21 +364,3 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop, .stop = c_stop,
.show = show_cpuinfo, .show = show_cpuinfo,
}; };
int s390_isolate_bp(void)
{
if (!test_facility(82))
return -EOPNOTSUPP;
set_thread_flag(TIF_ISOLATE_BP);
return 0;
}
EXPORT_SYMBOL(s390_isolate_bp);
int s390_isolate_bp_guest(void)
{
if (!test_facility(82))
return -EOPNOTSUPP;
set_thread_flag(TIF_ISOLATE_BP_GUEST);
return 0;
}
EXPORT_SYMBOL(s390_isolate_bp_guest);
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
# r2 = Function to be called after store status # r2 = Function to be called after store status
# r3 = Parameter for function # r3 = Parameter for function
# #
ENTRY(store_status) SYM_CODE_START(store_status)
/* Save register one and load save area base */ /* Save register one and load save area base */
stg %r1,__LC_SAVE_AREA_RESTART stg %r1,__LC_SAVE_AREA_RESTART
/* General purpose registers */ /* General purpose registers */
...@@ -61,7 +61,7 @@ ENTRY(store_status) ...@@ -61,7 +61,7 @@ ENTRY(store_status)
stpx 0(%r1) stpx 0(%r1)
/* Clock comparator - seven bytes */ /* Clock comparator - seven bytes */
lghi %r1,__LC_CLOCK_COMP_SAVE_AREA lghi %r1,__LC_CLOCK_COMP_SAVE_AREA
larl %r4,.Lclkcmp larl %r4,clkcmp
stckc 0(%r4) stckc 0(%r4)
mvc 1(7,%r1),1(%r4) mvc 1(7,%r1),1(%r4)
/* Program status word */ /* Program status word */
...@@ -73,9 +73,9 @@ ENTRY(store_status) ...@@ -73,9 +73,9 @@ ENTRY(store_status)
lgr %r9,%r2 lgr %r9,%r2
lgr %r2,%r3 lgr %r2,%r3
BR_EX %r9 BR_EX %r9
ENDPROC(store_status) SYM_CODE_END(store_status)
.section .bss .section .bss
.align 8 .balign 8
.Lclkcmp: .quad 0x0000000000000000 SYM_DATA_LOCAL(clkcmp, .quad 0x0000000000000000)
.previous .previous
...@@ -26,9 +26,9 @@ ...@@ -26,9 +26,9 @@
*/ */
.text .text
ENTRY(relocate_kernel) SYM_CODE_START(relocate_kernel)
basr %r13,0 # base address basr %r13,0 # base address
.base: .base:
lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7 lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7
lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9 lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9
lg %r5,0(%r2) # read another word for indirection page lg %r5,0(%r2) # read another word for indirection page
...@@ -38,25 +38,25 @@ ENTRY(relocate_kernel) ...@@ -38,25 +38,25 @@ ENTRY(relocate_kernel)
lgr %r6,%r5 # r6 = r5 lgr %r6,%r5 # r6 = r5
nill %r6,0xf000 # mask it out and... nill %r6,0xf000 # mask it out and...
j .base # ...next iteration j .base # ...next iteration
.indir_check: .indir_check:
tml %r5,0x2 # is it a indirection page? tml %r5,0x2 # is it a indirection page?
je .done_test # NO, goto "done_test" je .done_test # NO, goto "done_test"
nill %r5,0xf000 # YES, mask out, nill %r5,0xf000 # YES, mask out,
lgr %r2,%r5 # move it into the right register, lgr %r2,%r5 # move it into the right register,
j .base # and read next... j .base # and read next...
.done_test: .done_test:
tml %r5,0x4 # is it the done indicator? tml %r5,0x4 # is it the done indicator?
je .source_test # NO! Well, then it should be the source indicator... je .source_test # NO! Well, then it should be the source indicator...
j .done # ok, lets finish it here... j .done # ok, lets finish it here...
.source_test: .source_test:
tml %r5,0x8 # it should be a source indicator... tml %r5,0x8 # it should be a source indicator...
je .base # NO, ignore it... je .base # NO, ignore it...
lgr %r8,%r5 # r8 = r5 lgr %r8,%r5 # r8 = r5
nill %r8,0xf000 # masking nill %r8,0xf000 # masking
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
jo 0b jo 0b
j .base j .base
.done: .done:
lgr %r0,%r4 # subcode lgr %r0,%r4 # subcode
cghi %r3,0 cghi %r3,0
je .diag je .diag
...@@ -64,15 +64,13 @@ ENTRY(relocate_kernel) ...@@ -64,15 +64,13 @@ ENTRY(relocate_kernel)
o %r3,4(%r4) # or load address into psw o %r3,4(%r4) # or load address into psw
st %r3,4(%r4) st %r3,4(%r4)
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0 mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
.diag: .diag:
diag %r0,%r0,0x308 diag %r0,%r0,0x308
ENDPROC(relocate_kernel) SYM_CODE_END(relocate_kernel)
.align 8 .balign 8
load_psw: SYM_DATA_START_LOCAL(load_psw)
.long 0x00080000,0x80000000 .long 0x00080000,0x80000000
relocate_kernel_end: SYM_DATA_END_LABEL(load_psw, SYM_L_LOCAL, relocate_kernel_end)
.align 8 .balign 8
.globl relocate_kernel_len SYM_DATA(relocate_kernel_len, .quad relocate_kernel_end - relocate_kernel)
relocate_kernel_len:
.quad relocate_kernel_end - relocate_kernel
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/mem_detect.h> #include <asm/physmem_info.h>
#include <asm/maccess.h> #include <asm/maccess.h>
#include <asm/uv.h> #include <asm/uv.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -147,14 +147,10 @@ static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31; ...@@ -147,14 +147,10 @@ static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
int __bootdata(noexec_disabled); int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size); unsigned long __bootdata(ident_map_size);
struct mem_detect_info __bootdata(mem_detect); struct physmem_info __bootdata(physmem_info);
struct initrd_data __bootdata(initrd_data);
unsigned long __bootdata(pgalloc_pos);
unsigned long __bootdata(pgalloc_end);
unsigned long __bootdata(pgalloc_low);
unsigned long __bootdata_preserved(__kaslr_offset); unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata(__amode31_base); int __bootdata_preserved(__kaslr_enabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support); unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support); EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]); u64 __bootdata_preserved(stfle_fac_list[16]);
...@@ -385,39 +381,27 @@ void stack_free(unsigned long stack) ...@@ -385,39 +381,27 @@ void stack_free(unsigned long stack)
#endif #endif
} }
int __init arch_early_irq_init(void) void __init __noreturn arch_call_rest_init(void)
{ {
unsigned long stack; smp_reinit_ipl_cpu();
rest_init();
stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (!stack)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
return 0;
} }
void __init __noreturn arch_call_rest_init(void) static unsigned long __init stack_alloc_early(void)
{ {
unsigned long stack; unsigned long stack;
smp_reinit_ipl_cpu(); stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
stack = stack_alloc(); if (!stack) {
if (!stack) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
panic("Couldn't allocate kernel stack"); __func__, THREAD_SIZE, THREAD_SIZE);
current->stack = (void *) stack; }
#ifdef CONFIG_VMAP_STACK return stack;
current->stack_vm_area = (void *) stack;
#endif
set_task_stack_end_magic(current);
stack += STACK_INIT_OFFSET;
S390_lowcore.kernel_stack = stack;
call_on_stack_noreturn(rest_init, stack);
} }
static void __init setup_lowcore(void) static void __init setup_lowcore(void)
{ {
struct lowcore *lc, *abs_lc; struct lowcore *lc, *abs_lc;
unsigned long mcck_stack;
/* /*
* Setup lowcore for boot cpu * Setup lowcore for boot cpu
...@@ -441,8 +425,6 @@ static void __init setup_lowcore(void) ...@@ -441,8 +425,6 @@ static void __init setup_lowcore(void)
lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max; lc->clock_comparator = clock_comparator_max;
lc->nodat_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long)&init_task; lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC; lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags; lc->machine_flags = S390_lowcore.machine_flags;
...@@ -455,17 +437,15 @@ static void __init setup_lowcore(void) ...@@ -455,17 +437,15 @@ static void __init setup_lowcore(void)
lc->steal_timer = S390_lowcore.steal_timer; lc->steal_timer = S390_lowcore.steal_timer;
lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock; lc->last_update_clock = S390_lowcore.last_update_clock;
/* /*
* Allocate the global restart stack which is the same for * Allocate the global restart stack which is the same for
* all CPUs in cast *one* of them does a PSW restart. * all CPUs in case *one* of them does a PSW restart.
*/ */
restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); restart_stack = (void *)(stack_alloc_early() + STACK_INIT_OFFSET);
if (!restart_stack) lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
panic("%s: Failed to allocate %lu bytes align=0x%lx\n", lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET;
__func__, THREAD_SIZE, THREAD_SIZE); lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
restart_stack += STACK_INIT_OFFSET; lc->kernel_stack = S390_lowcore.kernel_stack;
/* /*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necessary if * restart data to the absolute zero lowcore. This is necessary if
...@@ -476,13 +456,6 @@ static void __init setup_lowcore(void) ...@@ -476,13 +456,6 @@ static void __init setup_lowcore(void)
lc->restart_data = 0; lc->restart_data = 0;
lc->restart_source = -1U; lc->restart_source = -1U;
__ctl_store(lc->cregs_save_area, 0, 15); __ctl_store(lc->cregs_save_area, 0, 15);
mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
if (!mcck_stack)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, THREAD_SIZE, THREAD_SIZE);
lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
lc->spinlock_lockval = arch_spin_lockval(0); lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0; lc->spinlock_index = 0;
arch_spin_lock_setup(0); arch_spin_lock_setup(0);
...@@ -635,7 +608,11 @@ static struct notifier_block kdump_mem_nb = { ...@@ -635,7 +608,11 @@ static struct notifier_block kdump_mem_nb = {
*/ */
static void __init reserve_pgtables(void) static void __init reserve_pgtables(void)
{ {
memblock_reserve(pgalloc_pos, pgalloc_end - pgalloc_pos); unsigned long start, end;
struct reserved_range *range;
for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end)
memblock_reserve(start, end - start);
} }
/* /*
...@@ -712,13 +689,13 @@ static void __init reserve_crashkernel(void) ...@@ -712,13 +689,13 @@ static void __init reserve_crashkernel(void)
*/ */
static void __init reserve_initrd(void) static void __init reserve_initrd(void)
{ {
#ifdef CONFIG_BLK_DEV_INITRD unsigned long addr, size;
if (!initrd_data.start || !initrd_data.size)
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || !get_physmem_reserved(RR_INITRD, &addr, &size))
return; return;
initrd_start = (unsigned long)__va(initrd_data.start); initrd_start = (unsigned long)__va(addr);
initrd_end = initrd_start + initrd_data.size; initrd_end = initrd_start + size;
memblock_reserve(initrd_data.start, initrd_data.size); memblock_reserve(addr, size);
#endif
} }
/* /*
...@@ -730,71 +707,39 @@ static void __init reserve_certificate_list(void) ...@@ -730,71 +707,39 @@ static void __init reserve_certificate_list(void)
memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size); memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
} }
static void __init reserve_mem_detect_info(void) static void __init reserve_physmem_info(void)
{ {
unsigned long start, size; unsigned long addr, size;
get_mem_detect_reserved(&start, &size); if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
if (size) memblock_reserve(addr, size);
memblock_reserve(start, size);
} }
static void __init free_mem_detect_info(void) static void __init free_physmem_info(void)
{ {
unsigned long start, size; unsigned long addr, size;
get_mem_detect_reserved(&start, &size); if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
if (size) memblock_phys_free(addr, size);
memblock_phys_free(start, size);
} }
static const char * __init get_mem_info_source(void) static void __init memblock_add_physmem_info(void)
{
switch (mem_detect.info_source) {
case MEM_DETECT_SCLP_STOR_INFO:
return "sclp storage info";
case MEM_DETECT_DIAG260:
return "diag260";
case MEM_DETECT_SCLP_READ_INFO:
return "sclp read info";
case MEM_DETECT_BIN_SEARCH:
return "binary search";
}
return "none";
}
static void __init memblock_add_mem_detect_info(void)
{ {
unsigned long start, end; unsigned long start, end;
int i; int i;
pr_debug("physmem info source: %s (%hhd)\n", pr_debug("physmem info source: %s (%hhd)\n",
get_mem_info_source(), mem_detect.info_source); get_physmem_info_source(), physmem_info.info_source);
/* keep memblock lists close to the kernel */ /* keep memblock lists close to the kernel */
memblock_set_bottom_up(true); memblock_set_bottom_up(true);
for_each_mem_detect_usable_block(i, &start, &end) for_each_physmem_usable_range(i, &start, &end)
memblock_add(start, end - start); memblock_add(start, end - start);
for_each_mem_detect_block(i, &start, &end) for_each_physmem_online_range(i, &start, &end)
memblock_physmem_add(start, end - start); memblock_physmem_add(start, end - start);
memblock_set_bottom_up(false); memblock_set_bottom_up(false);
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0); memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
} }
/*
* Check for initrd being in usable memory
*/
static void __init check_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_data.start && initrd_data.size &&
!memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
pr_err("The initial RAM disk does not fit into the memory\n");
memblock_phys_free(initrd_data.start, initrd_data.size);
initrd_start = initrd_end = 0;
}
#endif
}
/* /*
* Reserve memory used for lowcore/command line/kernel image. * Reserve memory used for lowcore/command line/kernel image.
*/ */
...@@ -803,7 +748,7 @@ static void __init reserve_kernel(void) ...@@ -803,7 +748,7 @@ static void __init reserve_kernel(void)
memblock_reserve(0, STARTUP_NORMAL_OFFSET); memblock_reserve(0, STARTUP_NORMAL_OFFSET);
memblock_reserve(OLDMEM_BASE, sizeof(unsigned long)); memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long)); memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
memblock_reserve(__amode31_base, __eamode31 - __samode31); memblock_reserve(physmem_info.reserved[RR_AMODE31].start, __eamode31 - __samode31);
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP); memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
memblock_reserve(__pa(_stext), _end - _stext); memblock_reserve(__pa(_stext), _end - _stext);
} }
...@@ -825,13 +770,13 @@ static void __init setup_memory(void) ...@@ -825,13 +770,13 @@ static void __init setup_memory(void)
static void __init relocate_amode31_section(void) static void __init relocate_amode31_section(void)
{ {
unsigned long amode31_size = __eamode31 - __samode31; unsigned long amode31_size = __eamode31 - __samode31;
long amode31_offset = __amode31_base - __samode31; long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31;
long *ptr; long *ptr;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */ /* Move original AMODE31 section to the new one */
memmove((void *)__amode31_base, (void *)__samode31, amode31_size); memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */ /* Zero out the old AMODE31 section to catch invalid accesses within it */
memset((void *)__samode31, 0, amode31_size); memset((void *)__samode31, 0, amode31_size);
...@@ -997,14 +942,14 @@ void __init setup_arch(char **cmdline_p) ...@@ -997,14 +942,14 @@ void __init setup_arch(char **cmdline_p)
reserve_kernel(); reserve_kernel();
reserve_initrd(); reserve_initrd();
reserve_certificate_list(); reserve_certificate_list();
reserve_mem_detect_info(); reserve_physmem_info();
memblock_set_current_limit(ident_map_size); memblock_set_current_limit(ident_map_size);
memblock_allow_resize(); memblock_allow_resize();
/* Get information about *all* installed memory */ /* Get information about *all* installed memory */
memblock_add_mem_detect_info(); memblock_add_physmem_info();
free_mem_detect_info(); free_physmem_info();
setup_memory_end(); setup_memory_end();
memblock_dump_all(); memblock_dump_all();
setup_memory(); setup_memory();
...@@ -1017,7 +962,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -1017,7 +962,6 @@ void __init setup_arch(char **cmdline_p)
if (MACHINE_HAS_EDAT2) if (MACHINE_HAS_EDAT2)
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
check_initrd();
reserve_crashkernel(); reserve_crashkernel();
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
/* /*
......
...@@ -280,9 +280,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) ...@@ -280,9 +280,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
cpu = pcpu - pcpu_devices; cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu]; lc = lowcore_ptr[cpu];
lc->kernel_stack = (unsigned long) task_stack_page(tsk) lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->current_task = (unsigned long)tsk;
lc->current_task = (unsigned long) tsk;
lc->lpp = LPP_MAGIC; lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid; lc->current_pid = tsk->pid;
lc->user_timer = tsk->thread.user_timer; lc->user_timer = tsk->thread.user_timer;
...@@ -348,7 +347,6 @@ static void pcpu_delegate(struct pcpu *pcpu, ...@@ -348,7 +347,6 @@ static void pcpu_delegate(struct pcpu *pcpu,
abs_lc->restart_source = source_cpu; abs_lc->restart_source = source_cpu;
put_abs_lowcore(abs_lc); put_abs_lowcore(abs_lc);
} }
__bpon();
asm volatile( asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n" "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n" " brc 2,0b # busy, try again\n"
...@@ -986,7 +984,6 @@ void __cpu_die(unsigned int cpu) ...@@ -986,7 +984,6 @@ void __cpu_die(unsigned int cpu)
void __noreturn cpu_die(void) void __noreturn cpu_die(void)
{ {
idle_task_exit(); idle_task_exit();
__bpon();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ; for (;;) ;
} }
...@@ -1302,9 +1299,9 @@ int __init smp_reinit_ipl_cpu(void) ...@@ -1302,9 +1299,9 @@ int __init smp_reinit_ipl_cpu(void)
local_mcck_enable(); local_mcck_enable();
local_irq_restore(flags); local_irq_restore(flags);
free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE); memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl)); memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
return 0; return 0;
} }
...@@ -449,7 +449,7 @@ ...@@ -449,7 +449,7 @@
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset 444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule 445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret 447 common memfd_secret sys_memfd_secret sys_memfd_secret
448 common process_mrelease sys_process_mrelease sys_process_mrelease 448 common process_mrelease sys_process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv sys_futex_waitv 449 common futex_waitv sys_futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
/* /*
* int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode) * int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode)
*/ */
ENTRY(_diag14_amode31) SYM_FUNC_START(_diag14_amode31)
lgr %r1,%r2 lgr %r1,%r2
lgr %r2,%r3 lgr %r2,%r3
lgr %r3,%r4 lgr %r3,%r4
...@@ -42,12 +42,12 @@ ENTRY(_diag14_amode31) ...@@ -42,12 +42,12 @@ ENTRY(_diag14_amode31)
lgfr %r2,%r5 lgfr %r2,%r5
BR_EX_AMODE31_r14 BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault) EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault)
ENDPROC(_diag14_amode31) SYM_FUNC_END(_diag14_amode31)
/* /*
* int _diag210_amode31(struct diag210 *addr) * int _diag210_amode31(struct diag210 *addr)
*/ */
ENTRY(_diag210_amode31) SYM_FUNC_START(_diag210_amode31)
lgr %r1,%r2 lgr %r1,%r2
lhi %r2,-1 lhi %r2,-1
sam31 sam31
...@@ -60,12 +60,12 @@ ENTRY(_diag210_amode31) ...@@ -60,12 +60,12 @@ ENTRY(_diag210_amode31)
lgfr %r2,%r2 lgfr %r2,%r2
BR_EX_AMODE31_r14 BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault) EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault)
ENDPROC(_diag210_amode31) SYM_FUNC_END(_diag210_amode31)
/* /*
* int diag8c(struct diag8c *addr, struct ccw_dev_id *devno, size_t len) * int diag8c(struct diag8c *addr, struct ccw_dev_id *devno, size_t len)
*/ */
ENTRY(_diag8c_amode31) SYM_FUNC_START(_diag8c_amode31)
llgf %r3,0(%r3) llgf %r3,0(%r3)
sam31 sam31
diag %r2,%r4,0x8c diag %r2,%r4,0x8c
...@@ -74,11 +74,11 @@ ENTRY(_diag8c_amode31) ...@@ -74,11 +74,11 @@ ENTRY(_diag8c_amode31)
lgfr %r2,%r3 lgfr %r2,%r3
BR_EX_AMODE31_r14 BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag8c_ex, .Ldiag8c_ex) EX_TABLE_AMODE31(.Ldiag8c_ex, .Ldiag8c_ex)
ENDPROC(_diag8c_amode31) SYM_FUNC_END(_diag8c_amode31)
/* /*
* int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode) * int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode)
*/ */
ENTRY(_diag26c_amode31) SYM_FUNC_START(_diag26c_amode31)
lghi %r5,-EOPNOTSUPP lghi %r5,-EOPNOTSUPP
sam31 sam31
diag %r2,%r4,0x26c diag %r2,%r4,0x26c
...@@ -87,42 +87,42 @@ ENTRY(_diag26c_amode31) ...@@ -87,42 +87,42 @@ ENTRY(_diag26c_amode31)
lgfr %r2,%r5 lgfr %r2,%r5
BR_EX_AMODE31_r14 BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex) EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex)
ENDPROC(_diag26c_amode31) SYM_FUNC_END(_diag26c_amode31)
/* /*
* void _diag0c_amode31(struct hypfs_diag0c_entry *entry) * void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
*/ */
ENTRY(_diag0c_amode31) SYM_FUNC_START(_diag0c_amode31)
sam31 sam31
diag %r2,%r2,0x0c diag %r2,%r2,0x0c
sam64 sam64
BR_EX_AMODE31_r14 BR_EX_AMODE31_r14
ENDPROC(_diag0c_amode31) SYM_FUNC_END(_diag0c_amode31)
/* /*
* void _diag308_reset_amode31(void) * void _diag308_reset_amode31(void)
* *
* Calls diag 308 subcode 1 and continues execution * Calls diag 308 subcode 1 and continues execution
*/ */
ENTRY(_diag308_reset_amode31) SYM_FUNC_START(_diag308_reset_amode31)
larl %r4,.Lctlregs # Save control registers larl %r4,ctlregs # Save control registers
stctg %c0,%c15,0(%r4) stctg %c0,%c15,0(%r4)
lg %r2,0(%r4) # Disable lowcore protection lg %r2,0(%r4) # Disable lowcore protection
nilh %r2,0xefff nilh %r2,0xefff
larl %r4,.Lctlreg0 larl %r4,ctlreg0
stg %r2,0(%r4) stg %r2,0(%r4)
lctlg %c0,%c0,0(%r4) lctlg %c0,%c0,0(%r4)
larl %r4,.Lfpctl # Floating point control register larl %r4,fpctl # Floating point control register
stfpc 0(%r4) stfpc 0(%r4)
larl %r4,.Lprefix # Save prefix register larl %r4,prefix # Save prefix register
stpx 0(%r4) stpx 0(%r4)
larl %r4,.Lprefix_zero # Set prefix register to 0 larl %r4,prefix_zero # Set prefix register to 0
spx 0(%r4) spx 0(%r4)
larl %r4,.Lcontinue_psw # Save PSW flags larl %r4,continue_psw # Save PSW flags
epsw %r2,%r3 epsw %r2,%r3
stm %r2,%r3,0(%r4) stm %r2,%r3,0(%r4)
larl %r4,.Lrestart_part2 # Setup restart PSW at absolute 0 larl %r4,.Lrestart_part2 # Setup restart PSW at absolute 0
larl %r3,.Lrestart_diag308_psw larl %r3,restart_diag308_psw
og %r4,0(%r3) # Save PSW og %r4,0(%r3) # Save PSW
lghi %r3,0 lghi %r3,0
sturg %r4,%r3 # Use sturg, because of large pages sturg %r4,%r3 # Use sturg, because of large pages
...@@ -134,39 +134,26 @@ ENTRY(_diag308_reset_amode31) ...@@ -134,39 +134,26 @@ ENTRY(_diag308_reset_amode31)
lhi %r1,2 # Use mode 2 = ESAME (dump) lhi %r1,2 # Use mode 2 = ESAME (dump)
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
sam64 # Switch to 64 bit addressing mode sam64 # Switch to 64 bit addressing mode
larl %r4,.Lctlregs # Restore control registers larl %r4,ctlregs # Restore control registers
lctlg %c0,%c15,0(%r4) lctlg %c0,%c15,0(%r4)
larl %r4,.Lfpctl # Restore floating point ctl register larl %r4,fpctl # Restore floating point ctl register
lfpc 0(%r4) lfpc 0(%r4)
larl %r4,.Lprefix # Restore prefix register larl %r4,prefix # Restore prefix register
spx 0(%r4) spx 0(%r4)
larl %r4,.Lcontinue_psw # Restore PSW flags larl %r4,continue_psw # Restore PSW flags
larl %r2,.Lcontinue larl %r2,.Lcontinue
stg %r2,8(%r4) stg %r2,8(%r4)
lpswe 0(%r4) lpswe 0(%r4)
.Lcontinue: .Lcontinue:
BR_EX_AMODE31_r14 BR_EX_AMODE31_r14
ENDPROC(_diag308_reset_amode31) SYM_FUNC_END(_diag308_reset_amode31)
.section .amode31.data,"aw",@progbits .section .amode31.data,"aw",@progbits
.align 8 .balign 8
.Lrestart_diag308_psw: SYM_DATA_LOCAL(restart_diag308_psw, .long 0x00080000,0x80000000)
.long 0x00080000,0x80000000 SYM_DATA_LOCAL(continue_psw, .quad 0,0)
SYM_DATA_LOCAL(ctlreg0, .quad 0)
.align 8 SYM_DATA_LOCAL(ctlregs, .fill 16,8,0)
.Lcontinue_psw: SYM_DATA_LOCAL(fpctl, .long 0)
.quad 0,0 SYM_DATA_LOCAL(prefix, .long 0)
SYM_DATA_LOCAL(prefix_zero, .long 0)
.align 8
.Lctlreg0:
.quad 0
.Lctlregs:
.rept 16
.quad 0
.endr
.Lfpctl:
.long 0
.Lprefix:
.long 0
.Lprefix_zero:
.long 0
...@@ -637,16 +637,6 @@ static struct ctl_table topology_ctl_table[] = { ...@@ -637,16 +637,6 @@ static struct ctl_table topology_ctl_table[] = {
{ }, { },
}; };
static struct ctl_table topology_dir_table[] = {
{
.procname = "s390",
.maxlen = 0,
.mode = 0555,
.child = topology_ctl_table,
},
{ },
};
static int __init topology_init(void) static int __init topology_init(void)
{ {
struct device *dev_root; struct device *dev_root;
...@@ -657,7 +647,7 @@ static int __init topology_init(void) ...@@ -657,7 +647,7 @@ static int __init topology_init(void)
set_topology_timer(); set_topology_timer();
else else
topology_update_polarization_simple(); topology_update_polarization_simple();
register_sysctl_table(topology_dir_table); register_sysctl("s390", topology_ctl_table);
dev_root = bus_get_dev_root(&cpu_subsys); dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) { if (dev_root) {
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/dwarf.h> #include <asm/dwarf.h>
.macro vdso_syscall func,syscall .macro vdso_syscall func,syscall
.globl __kernel_compat_\func .globl __kernel_compat_\func
.type __kernel_compat_\func,@function .type __kernel_compat_\func,@function
.align 8 __ALIGN
__kernel_compat_\func: __kernel_compat_\func:
CFI_STARTPROC CFI_STARTPROC
svc \syscall svc \syscall
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -16,7 +17,7 @@ ...@@ -16,7 +17,7 @@
.macro vdso_func func .macro vdso_func func
.globl __kernel_\func .globl __kernel_\func
.type __kernel_\func,@function .type __kernel_\func,@function
.align 8 __ALIGN
__kernel_\func: __kernel_\func:
CFI_STARTPROC CFI_STARTPROC
aghi %r15,-WRAPPER_FRAME_SIZE aghi %r15,-WRAPPER_FRAME_SIZE
...@@ -41,7 +42,7 @@ vdso_func getcpu ...@@ -41,7 +42,7 @@ vdso_func getcpu
.macro vdso_syscall func,syscall .macro vdso_syscall func,syscall
.globl __kernel_\func .globl __kernel_\func
.type __kernel_\func,@function .type __kernel_\func,@function
.align 8 __ALIGN
__kernel_\func: __kernel_\func:
CFI_STARTPROC CFI_STARTPROC
svc \syscall svc \syscall
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) \ #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) \
*(.bss..invalid_pg_dir) *(.bss..invalid_pg_dir)
#define RO_EXCEPTION_TABLE_ALIGN 16
/* Handle ro_after_init data on our own. */ /* Handle ro_after_init data on our own. */
#define RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA
...@@ -66,7 +68,6 @@ SECTIONS ...@@ -66,7 +68,6 @@ SECTIONS
*(.data..ro_after_init) *(.data..ro_after_init)
JUMP_TABLE_DATA JUMP_TABLE_DATA
} :data } :data
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__end_ro_after_init = .; __end_ro_after_init = .;
...@@ -219,6 +220,13 @@ SECTIONS ...@@ -219,6 +220,13 @@ SECTIONS
QUAD(init_mm) QUAD(init_mm)
QUAD(swapper_pg_dir) QUAD(swapper_pg_dir)
QUAD(invalid_pg_dir) QUAD(invalid_pg_dir)
#ifdef CONFIG_KASAN
QUAD(kasan_early_shadow_page)
QUAD(kasan_early_shadow_pte)
QUAD(kasan_early_shadow_pmd)
QUAD(kasan_early_shadow_pud)
QUAD(kasan_early_shadow_p4d)
#endif
} :NONE } :NONE
/* Debugging sections. */ /* Debugging sections. */
......
...@@ -14,8 +14,7 @@ ...@@ -14,8 +14,7 @@
/* /*
* void *memmove(void *dest, const void *src, size_t n) * void *memmove(void *dest, const void *src, size_t n)
*/ */
WEAK(memmove) SYM_FUNC_START(__memmove)
ENTRY(__memmove)
ltgr %r4,%r4 ltgr %r4,%r4
lgr %r1,%r2 lgr %r1,%r2
jz .Lmemmove_exit jz .Lmemmove_exit
...@@ -48,7 +47,10 @@ ENTRY(__memmove) ...@@ -48,7 +47,10 @@ ENTRY(__memmove)
BR_EX %r14 BR_EX %r14
.Lmemmove_mvc: .Lmemmove_mvc:
mvc 0(1,%r1),0(%r3) mvc 0(1,%r1),0(%r3)
ENDPROC(__memmove) SYM_FUNC_END(__memmove)
EXPORT_SYMBOL(__memmove)
SYM_FUNC_ALIAS(memmove, __memmove)
EXPORT_SYMBOL(memmove) EXPORT_SYMBOL(memmove)
/* /*
...@@ -66,8 +68,7 @@ EXPORT_SYMBOL(memmove) ...@@ -66,8 +68,7 @@ EXPORT_SYMBOL(memmove)
* return __builtin_memset(s, c, n); * return __builtin_memset(s, c, n);
* } * }
*/ */
WEAK(memset) SYM_FUNC_START(__memset)
ENTRY(__memset)
ltgr %r4,%r4 ltgr %r4,%r4
jz .Lmemset_exit jz .Lmemset_exit
ltgr %r3,%r3 ltgr %r3,%r3
...@@ -111,7 +112,10 @@ ENTRY(__memset) ...@@ -111,7 +112,10 @@ ENTRY(__memset)
xc 0(1,%r1),0(%r1) xc 0(1,%r1),0(%r1)
.Lmemset_mvc: .Lmemset_mvc:
mvc 1(1,%r1),0(%r1) mvc 1(1,%r1),0(%r1)
ENDPROC(__memset) SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
SYM_FUNC_ALIAS(memset, __memset)
EXPORT_SYMBOL(memset) EXPORT_SYMBOL(memset)
/* /*
...@@ -119,8 +123,7 @@ EXPORT_SYMBOL(memset) ...@@ -119,8 +123,7 @@ EXPORT_SYMBOL(memset)
* *
* void *memcpy(void *dest, const void *src, size_t n) * void *memcpy(void *dest, const void *src, size_t n)
*/ */
WEAK(memcpy) SYM_FUNC_START(__memcpy)
ENTRY(__memcpy)
ltgr %r4,%r4 ltgr %r4,%r4
jz .Lmemcpy_exit jz .Lmemcpy_exit
aghi %r4,-1 aghi %r4,-1
...@@ -141,7 +144,10 @@ ENTRY(__memcpy) ...@@ -141,7 +144,10 @@ ENTRY(__memcpy)
j .Lmemcpy_remainder j .Lmemcpy_remainder
.Lmemcpy_mvc: .Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3) mvc 0(1,%r1),0(%r3)
ENDPROC(__memcpy) SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
SYM_FUNC_ALIAS(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(memcpy)
/* /*
...@@ -152,7 +158,7 @@ EXPORT_SYMBOL(memcpy) ...@@ -152,7 +158,7 @@ EXPORT_SYMBOL(memcpy)
* void *__memset64(uint64_t *s, uint64_t v, size_t count) * void *__memset64(uint64_t *s, uint64_t v, size_t count)
*/ */
.macro __MEMSET bits,bytes,insn .macro __MEMSET bits,bytes,insn
ENTRY(__memset\bits) SYM_FUNC_START(__memset\bits)
ltgr %r4,%r4 ltgr %r4,%r4
jz .L__memset_exit\bits jz .L__memset_exit\bits
cghi %r4,\bytes cghi %r4,\bytes
...@@ -178,7 +184,7 @@ ENTRY(__memset\bits) ...@@ -178,7 +184,7 @@ ENTRY(__memset\bits)
BR_EX %r14 BR_EX %r14
.L__memset_mvc\bits: .L__memset_mvc\bits:
mvc \bytes(1,%r1),0(%r1) mvc \bytes(1,%r1),0(%r1)
ENDPROC(__memset\bits) SYM_FUNC_END(__memset\bits)
.endm .endm
__MEMSET 16,2,sth __MEMSET 16,2,sth
......
This diff is collapsed.
...@@ -10,6 +10,3 @@ obj-$(CONFIG_CMM) += cmm.o ...@@ -10,6 +10,3 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o obj-$(CONFIG_PGSTE) += gmap.o
KASAN_SANITIZE_kasan_init.o := n
obj-$(CONFIG_KASAN) += kasan_init.o
...@@ -335,16 +335,6 @@ static struct ctl_table cmm_table[] = { ...@@ -335,16 +335,6 @@ static struct ctl_table cmm_table[] = {
{ } { }
}; };
static struct ctl_table cmm_dir_table[] = {
{
.procname = "vm",
.maxlen = 0,
.mode = 0555,
.child = cmm_table,
},
{ }
};
#ifdef CONFIG_CMM_IUCV #ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM" #define SMSG_PREFIX "CMM"
static void cmm_smsg_target(const char *from, char *msg) static void cmm_smsg_target(const char *from, char *msg)
...@@ -389,7 +379,7 @@ static int __init cmm_init(void) ...@@ -389,7 +379,7 @@ static int __init cmm_init(void)
{ {
int rc = -ENOMEM; int rc = -ENOMEM;
cmm_sysctl_header = register_sysctl_table(cmm_dir_table); cmm_sysctl_header = register_sysctl("vm", cmm_table);
if (!cmm_sysctl_header) if (!cmm_sysctl_header)
goto out_sysctl; goto out_sysctl;
#ifdef CONFIG_CMM_IUCV #ifdef CONFIG_CMM_IUCV
......
...@@ -176,9 +176,8 @@ void __init mem_init(void) ...@@ -176,9 +176,8 @@ void __init mem_init(void)
void free_initmem(void) void free_initmem(void)
{ {
__set_memory((unsigned long)_sinittext, set_memory_rwnx((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT, (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
SET_MEMORY_RW | SET_MEMORY_NX);
free_initmem_default(POISON_FREE_INITMEM); free_initmem_default(POISON_FREE_INITMEM);
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -204,7 +204,7 @@ struct read_storage_sccb { ...@@ -204,7 +204,7 @@ struct read_storage_sccb {
u16 assigned; u16 assigned;
u16 standby; u16 standby;
u16 :16; u16 :16;
u32 entries[0]; u32 entries[];
} __packed; } __packed;
static inline void sclp_fill_core_info(struct sclp_core_info *info, static inline void sclp_fill_core_info(struct sclp_core_info *info,
......
...@@ -241,7 +241,7 @@ struct attach_storage_sccb { ...@@ -241,7 +241,7 @@ struct attach_storage_sccb {
u16 :16; u16 :16;
u16 assigned; u16 assigned;
u32 :32; u32 :32;
u32 entries[0]; u32 entries[];
} __packed; } __packed;
static int sclp_attach_storage(u8 id) static int sclp_attach_storage(u8 id)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment