Commit 45365a06 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Heiko Carstens:

 - Add support for function error injection.

 - Add support for custom exception handlers, as required by
   BPF_PROBE_MEM.

 - Add support for BPF_PROBE_MEM.

 - Add trace events for idle enter / exit for the s390 specific idle
   implementation.

 - Remove unused zcore memmmap device.

 - Remove unused "raw view" from s390 debug feature.

 - AP bus + zcrypt device driver code refactoring.

 - Provide cex4 cca sysfs attributes for cex3 for zcrypt device driver.

 - Expose only minimal interface to walk physmem for mm/memblock. This
   is a common code change and it has been agreed on with Mike Rapoport
   and Andrew Morton that this can go upstream via the s390 tree.

 - Rework of the s390 vmem/vmmemap code to allow for future memory hot
   remove.

 - Get rid of FORCE_MAX_ZONEORDER to finally allow for order-10
   allocations again, instead of only order-8 allocations.

 - Various small improvements and fixes.

* tag 's390-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (48 commits)
  s390/vmemmap: coding style updates
  s390/vmemmap: avoid memset(PAGE_UNUSED) when adding consecutive sections
  s390/vmemmap: remember unused sub-pmd ranges
  s390/vmemmap: fallback to PTEs if mapping large PMD fails
  s390/vmem: cleanup empty page tables
  s390/vmemmap: take the vmem_mutex when populating/freeing
  s390/vmemmap: cleanup when vmemmap_populate() fails
  s390/vmemmap: extend modify_pagetable() to handle vmemmap
  s390/vmem: consolidate vmem_add_range() and vmem_remove_range()
  s390/vmem: rename vmem_add_mem() to vmem_add_range()
  s390: enable HAVE_FUNCTION_ERROR_INJECTION
  s390/pci: clarify comment in s390_mmio_read/write
  s390/time: improve comparison for tod steering
  s390/time: select CLOCKSOURCE_VALIDATE_LAST_CYCLE
  s390/time: use CLOCKSOURCE_MASK
  s390/bpf: implement BPF_PROBE_MEM
  s390/kernel: expand exception table logic to allow new handling options
  s390/kernel: unify EX_TABLE* implementations
  s390/mm: allow order 10 allocations
  s390/mm: avoid trimming to MAX_ORDER
  ...
parents cdc8fcb4 9a996c67
......@@ -67,7 +67,7 @@ corresponding component. The debugfs normally should be mounted to
The content of the directories are files which represent different views
to the debug log. Each component can decide which views should be
used through registering them with the function :c:func:`debug_register_view()`.
Predefined views for hex/ascii, sprintf and raw binary data are provided.
Predefined views for hex/ascii and sprintf data are provided.
It is also possible to define other views. The content of
a view can be inspected simply by reading the corresponding debugfs file.
......@@ -119,8 +119,6 @@ Predefined views:
extern struct debug_view debug_hex_ascii_view;
extern struct debug_view debug_raw_view;
extern struct debug_view debug_sprintf_view;
Examples
......@@ -129,7 +127,7 @@ Examples
.. code-block:: c
/*
* hex_ascii- + raw-view Example
* hex_ascii-view Example
*/
#include <linux/init.h>
......@@ -143,7 +141,6 @@ Examples
debug_info = debug_register("test", 1, 4, 4 );
debug_register_view(debug_info, &debug_hex_ascii_view);
debug_register_view(debug_info, &debug_raw_view);
debug_text_event(debug_info, 4 , "one ");
debug_int_exception(debug_info, 4, 4711);
......@@ -201,7 +198,7 @@ debugfs-files:
Example::
> ls /sys/kernel/debug/s390dbf/dasd
flush hex_ascii level pages raw
flush hex_ascii level pages
> cat /sys/kernel/debug/s390dbf/dasd/hex_ascii | sort -k2,2 -s
00 00974733272:680099 2 - 02 0006ad7e 07 ea 4a 90 | ....
00 00974733272:682210 2 - 02 0006ade6 46 52 45 45 | FREE
......@@ -298,10 +295,9 @@ order to see the debug entries well formatted.
Predefined Views
----------------
There are three predefined views: hex_ascii, raw and sprintf.
There are two predefined views: hex_ascii and sprintf.
The hex_ascii view shows the data field in hex and ascii representation
(e.g. ``45 43 4b 44 | ECKD``).
The raw view returns a bytestream as the debug areas are stored in memory.
The sprintf view formats the debug entries in the same way as the sprintf
function would do. The sprintf event/exception functions write to the
......@@ -334,11 +330,6 @@ The format of the hex_ascii and sprintf view is as follows:
- Return Address to caller
- data field
The format of the raw view is:
- Header as described in debug.h
- datafield
A typical line of the hex_ascii view will look like the following (first line
is only for explanation and will not be displayed when 'cating' the view)::
......
......@@ -102,7 +102,6 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select ARCH_KEEP_MEMBLOCK
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING
......@@ -126,6 +125,7 @@ config S390
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
......@@ -145,6 +145,7 @@ config S390
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FENTRY
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
......@@ -626,10 +627,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y
config FORCE_MAX_ZONEORDER
int
default "9"
config MAX_PHYSMEM_BITS
int "Maximum size of supported physical memory in bits (42-53)"
range 42 53
......
......@@ -129,8 +129,7 @@ static void appldata_get_os_data(void *data)
os_data->nr_cpus = j;
new_size = sizeof(struct appldata_os_data) +
(os_data->nr_cpus * sizeof(struct appldata_os_per_cpu));
new_size = struct_size(os_data, os_cpu, os_data->nr_cpus);
if (ops.size != new_size) {
if (ops.active) {
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
......@@ -165,8 +164,7 @@ static int __init appldata_os_init(void)
{
int rc, max_size;
max_size = sizeof(struct appldata_os_data) +
(num_possible_cpus() * sizeof(struct appldata_os_per_cpu));
max_size = struct_size(appldata_os_data, os_cpu, num_possible_cpus());
if (max_size > APPLDATA_MAX_REC_SIZE) {
pr_err("Maximum OS record size %i exceeds the maximum "
"record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_ASM_CONST_H
#define _ASM_S390_ASM_CONST_H
#ifdef __ASSEMBLY__
# define stringify_in_c(...) __VA_ARGS__
#else
/* This version of stringify will deal with commas... */
# define __stringify_in_c(...) #__VA_ARGS__
# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
#endif
#endif /* _ASM_S390_ASM_CONST_H */
......@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/refcount.h>
#include <uapi/asm/debug.h>
#include <linux/fs.h>
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
......@@ -26,6 +26,21 @@
#define DEBUG_DATA(entry) (char *)(entry + 1) /* data is stored behind */
/* the entry information */
#define __DEBUG_FEATURE_VERSION 2 /* version of debug feature */
struct __debug_entry {
union {
struct {
unsigned long clock : 52;
unsigned long exception : 1;
unsigned long level : 3;
unsigned long cpuid : 8;
} fields;
unsigned long stck;
} id;
void *caller;
} __packed;
typedef struct __debug_entry debug_entry_t;
struct debug_view;
......@@ -82,7 +97,6 @@ struct debug_view {
};
extern struct debug_view debug_hex_ascii_view;
extern struct debug_view debug_raw_view;
extern struct debug_view debug_sprintf_view;
/* do NOT use the _common functions */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __S390_EXTABLE_H
#define __S390_EXTABLE_H
#include <asm/ptrace.h>
#include <linux/compiler.h>
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
* The exception table consists of three addresses:
*
* - Address of an instruction that is allowed to fault.
* - Address at which the program should continue.
* - Optional address of handler that takes pt_regs * argument and runs in
* interrupt context.
*
* No registers are modified, so it is entirely up to the continuation code
* to figure out what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
......@@ -17,6 +25,7 @@
struct exception_table_entry
{
int insn, fixup;
long handler;
};
extern struct exception_table_entry *__start_dma_ex_table;
......@@ -29,6 +38,39 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
return (unsigned long)&x->fixup + x->fixup;
}
typedef bool (*ex_handler_t)(const struct exception_table_entry *,
struct pt_regs *);
static inline ex_handler_t
ex_fixup_handler(const struct exception_table_entry *x)
{
if (likely(!x->handler))
return NULL;
return (ex_handler_t)((unsigned long)&x->handler + x->handler);
}
static inline bool ex_handle(const struct exception_table_entry *x,
struct pt_regs *regs)
{
ex_handler_t handler = ex_fixup_handler(x);
if (unlikely(handler))
return handler(x, regs);
regs->psw.addr = extable_fixup(x);
return true;
}
#define ARCH_HAS_RELATIVE_EXTABLE
static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
struct exception_table_entry *b,
struct exception_table_entry tmp,
int delta)
{
a->fixup = b->fixup + delta;
b->fixup = tmp.fixup - delta;
a->handler = b->handler + delta;
b->handler = tmp.handler - delta;
}
#endif
......@@ -2,38 +2,27 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#include <asm/asm-const.h>
#include <linux/stringify.h>
#define __ALIGN .align 4, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
#ifndef __ASSEMBLY__
/*
* Helper macro for exception table entries
*/
#define EX_TABLE(_fault, _target) \
".section __ex_table,\"a\"\n" \
".align 4\n" \
".long (" #_fault ") - .\n" \
".long (" #_target ") - .\n" \
".previous\n"
#else /* __ASSEMBLY__ */
#define EX_TABLE(_fault, _target) \
.section __ex_table,"a" ; \
.align 4 ; \
.long (_fault) - . ; \
.long (_target) - . ; \
.previous
#define __EX_TABLE(_section, _fault, _target) \
stringify_in_c(.section _section,"a";) \
stringify_in_c(.align 8;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.quad 0;) \
stringify_in_c(.previous)
#define EX_TABLE_DMA(_fault, _target) \
.section .dma.ex_table, "a" ; \
.align 4 ; \
.long (_fault) - . ; \
.long (_target) - . ; \
.previous
#define EX_TABLE(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target)
#define EX_TABLE_DMA(_fault, _target) \
__EX_TABLE(.dma.ex_table, _fault, _target)
#endif /* __ASSEMBLY__ */
#endif
......@@ -131,12 +131,6 @@ static inline void validate_st_entry(unsigned long *entry)
*entry |= ZPCI_TABLE_VALID;
}
static inline void invalidate_table_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry |= ZPCI_TABLE_INVALID;
}
static inline void invalidate_pt_entry(unsigned long *entry)
{
WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
......@@ -173,11 +167,6 @@ static inline int pt_entry_isvalid(unsigned long entry)
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
static inline int entry_isprotected(unsigned long entry)
{
return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
}
static inline unsigned long *get_rt_sto(unsigned long entry)
{
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
......
......@@ -1669,7 +1669,7 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define kern_addr_valid(addr) (1)
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern int vmem_remove_mapping(unsigned long start, unsigned long size);
extern void vmem_remove_mapping(unsigned long start, unsigned long size);
extern int s390_enable_sie(void);
extern int s390_enable_skey(void);
extern void s390_reset_cmma(struct mm_struct *mm);
......
......@@ -184,5 +184,10 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
return regs->gprs[15];
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gprs[2] = rc;
}
#endif /* __ASSEMBLY__ */
#endif /* _S390_PTRACE_H */
......@@ -54,6 +54,10 @@ static inline int smp_get_base_cpu(int cpu)
return cpu - (cpu % (smp_cpu_mtid + 1));
}
static inline void smp_cpus_done(unsigned int max_cpus)
{
}
extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
......
......@@ -30,7 +30,7 @@
})
#define __S390_SYS_STUBx(x, name, ...) \
asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
{ \
......@@ -46,7 +46,7 @@
#define COMPAT_SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __s390_compat_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__s390_compat__sys_##sname, ERRNO); \
ALLOW_ERROR_INJECTION(__s390_compat_sys_##sname, ERRNO); \
asmlinkage long __s390_compat_sys_##sname(void)
#define SYSCALL_DEFINE0(sname) \
......@@ -72,7 +72,7 @@
asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_compat_sys##name)))); \
ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
......
......@@ -49,11 +49,6 @@ static inline void set_clock_comparator(__u64 time)
asm volatile("sckc %0" : : "Q" (time));
}
static inline void store_clock_comparator(__u64 *time)
{
asm volatile("stckc %0" : "=Q" (*time));
}
void clock_comparator_work(void);
void __init time_early_init(void);
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* S/390 debug facility
*
* Copyright IBM Corp. 1999, 2000
*/
#ifndef _UAPIDEBUG_H
#define _UAPIDEBUG_H
#include <linux/fs.h>
/* Note:
* struct __debug_entry must be defined outside of #ifdef __KERNEL__
* in order to allow a user program to analyze the 'raw'-view.
*/
struct __debug_entry{
union {
struct {
unsigned long long clock:52;
unsigned long long exception:1;
unsigned long long level:3;
unsigned long long cpuid:8;
} fields;
unsigned long long stck;
} id;
void* caller;
} __attribute__((packed));
#define __DEBUG_FEATURE_VERSION 2 /* version of debug feature */
#endif /* _UAPIDEBUG_H */
......@@ -36,12 +36,12 @@
* - length(n_modulus) = inputdatalength
*/
struct ica_rsa_modexpo {
char __user *inputdata;
unsigned int inputdatalength;
char __user *outputdata;
unsigned int outputdatalength;
char __user *b_key;
char __user *n_modulus;
__u8 __user *inputdata;
__u32 inputdatalength;
__u8 __user *outputdata;
__u32 outputdatalength;
__u8 __user *b_key;
__u8 __user *n_modulus;
};
/**
......@@ -59,15 +59,15 @@ struct ica_rsa_modexpo {
* - length(u_mult_inv) = inputdatalength/2 + 8
*/
struct ica_rsa_modexpo_crt {
char __user *inputdata;
unsigned int inputdatalength;
char __user *outputdata;
unsigned int outputdatalength;
char __user *bp_key;
char __user *bq_key;
char __user *np_prime;
char __user *nq_prime;
char __user *u_mult_inv;
__u8 __user *inputdata;
__u32 inputdatalength;
__u8 __user *outputdata;
__u32 outputdatalength;
__u8 __user *bp_key;
__u8 __user *bq_key;
__u8 __user *np_prime;
__u8 __user *nq_prime;
__u8 __user *u_mult_inv;
};
/**
......@@ -83,67 +83,67 @@ struct ica_rsa_modexpo_crt {
* key block
*/
struct CPRBX {
unsigned short cprb_len; /* CPRB length 220 */
unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
unsigned char pad_000[3]; /* Alignment pad bytes */
unsigned char func_id[2]; /* function id 0x5432 */
unsigned char cprb_flags[4]; /* Flags */
unsigned int req_parml; /* request parameter buffer len */
unsigned int req_datal; /* request data buffer */
unsigned int rpl_msgbl; /* reply message block length */
unsigned int rpld_parml; /* replied parameter block len */
unsigned int rpl_datal; /* reply data block len */
unsigned int rpld_datal; /* replied data block len */
unsigned int req_extbl; /* request extension block len */
unsigned char pad_001[4]; /* reserved */
unsigned int rpld_extbl; /* replied extension block len */
unsigned char padx000[16 - sizeof(char *)];
unsigned char *req_parmb; /* request parm block 'address' */
unsigned char padx001[16 - sizeof(char *)];
unsigned char *req_datab; /* request data block 'address' */
unsigned char padx002[16 - sizeof(char *)];
unsigned char *rpl_parmb; /* reply parm block 'address' */
unsigned char padx003[16 - sizeof(char *)];
unsigned char *rpl_datab; /* reply data block 'address' */
unsigned char padx004[16 - sizeof(char *)];
unsigned char *req_extb; /* request extension block 'addr'*/
unsigned char padx005[16 - sizeof(char *)];
unsigned char *rpl_extb; /* reply extension block 'address'*/
unsigned short ccp_rtcode; /* server return code */
unsigned short ccp_rscode; /* server reason code */
unsigned int mac_data_len; /* Mac Data Length */
unsigned char logon_id[8]; /* Logon Identifier */
unsigned char mac_value[8]; /* Mac Value */
unsigned char mac_content_flgs;/* Mac content flag byte */
unsigned char pad_002; /* Alignment */
unsigned short domain; /* Domain */
unsigned char usage_domain[4];/* Usage domain */
unsigned char cntrl_domain[4];/* Control domain */
unsigned char S390enf_mask[4];/* S/390 enforcement mask */
unsigned char pad_004[36]; /* reserved */
__u16 cprb_len; /* CPRB length 220 */
__u8 cprb_ver_id; /* CPRB version id. 0x02 */
__u8 pad_000[3]; /* Alignment pad bytes */
__u8 func_id[2]; /* function id 0x5432 */
__u8 cprb_flags[4]; /* Flags */
__u32 req_parml; /* request parameter buffer len */
__u32 req_datal; /* request data buffer */
__u32 rpl_msgbl; /* reply message block length */
__u32 rpld_parml; /* replied parameter block len */
__u32 rpl_datal; /* reply data block len */
__u32 rpld_datal; /* replied data block len */
__u32 req_extbl; /* request extension block len */
__u8 pad_001[4]; /* reserved */
__u32 rpld_extbl; /* replied extension block len */
__u8 padx000[16 - sizeof(__u8 *)];
__u8 __user *req_parmb; /* request parm block 'address' */
__u8 padx001[16 - sizeof(__u8 *)];
__u8 __user *req_datab; /* request data block 'address' */
__u8 padx002[16 - sizeof(__u8 *)];
__u8 __user *rpl_parmb; /* reply parm block 'address' */
__u8 padx003[16 - sizeof(__u8 *)];
__u8 __user *rpl_datab; /* reply data block 'address' */
__u8 padx004[16 - sizeof(__u8 *)];
__u8 __user *req_extb; /* request extension block 'addr'*/
__u8 padx005[16 - sizeof(__u8 *)];
__u8 __user *rpl_extb; /* reply extension block 'address'*/
__u16 ccp_rtcode; /* server return code */
__u16 ccp_rscode; /* server reason code */
__u32 mac_data_len; /* Mac Data Length */
__u8 logon_id[8]; /* Logon Identifier */
__u8 mac_value[8]; /* Mac Value */
__u8 mac_content_flgs; /* Mac content flag byte */
__u8 pad_002; /* Alignment */
__u16 domain; /* Domain */
__u8 usage_domain[4]; /* Usage domain */
__u8 cntrl_domain[4]; /* Control domain */
__u8 S390enf_mask[4]; /* S/390 enforcement mask */
__u8 pad_004[36]; /* reserved */
} __attribute__((packed));
/**
* xcRB
*/
struct ica_xcRB {
unsigned short agent_ID;
unsigned int user_defined;
unsigned short request_ID;
unsigned int request_control_blk_length;
unsigned char padding1[16 - sizeof(char *)];
char __user *request_control_blk_addr;
unsigned int request_data_length;
char padding2[16 - sizeof(char *)];
char __user *request_data_address;
unsigned int reply_control_blk_length;
char padding3[16 - sizeof(char *)];
char __user *reply_control_blk_addr;
unsigned int reply_data_length;
char padding4[16 - sizeof(char *)];
char __user *reply_data_addr;
unsigned short priority_window;
unsigned int status;
__u16 agent_ID;
__u32 user_defined;
__u16 request_ID;
__u32 request_control_blk_length;
__u8 _padding1[16 - sizeof(__u8 *)];
__u8 __user *request_control_blk_addr;
__u32 request_data_length;
__u8 _padding2[16 - sizeof(__u8 *)];
__u8 __user *request_data_address;
__u32 reply_control_blk_length;
__u8 _padding3[16 - sizeof(__u8 *)];
__u8 __user *reply_control_blk_addr;
__u32 reply_data_length;
__u8 __padding4[16 - sizeof(__u8 *)];
__u8 __user *reply_data_addr;
__u16 priority_window;
__u32 status;
} __attribute__((packed));
/**
......
......@@ -549,8 +549,7 @@ static int get_mem_chunk_cnt(void)
int cnt = 0;
u64 idx;
for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE,
MEMBLOCK_NONE, NULL, NULL, NULL)
for_each_physmem_range(idx, &oldmem_type, NULL, NULL)
cnt++;
return cnt;
}
......@@ -563,8 +562,7 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
phys_addr_t start, end;
u64 idx;
for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE,
MEMBLOCK_NONE, &start, &end, NULL) {
for_each_physmem_range(idx, &oldmem_type, &start, &end) {
phdr->p_filesz = end - start;
phdr->p_type = PT_LOAD;
phdr->p_offset = start;
......
......@@ -90,27 +90,11 @@ static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
size_t user_buf_size, loff_t *offset);
static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf);
static int debug_raw_format_fn(debug_info_t *id,
struct debug_view *view, char *out_buf,
const char *in_buf);
static int debug_raw_header_fn(debug_info_t *id, struct debug_view *view,
int area, debug_entry_t *entry, char *out_buf);
static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, debug_sprintf_entry_t *curr_event);
/* globals */
struct debug_view debug_raw_view = {
"raw",
NULL,
&debug_raw_header_fn,
&debug_raw_format_fn,
NULL,
NULL
};
EXPORT_SYMBOL(debug_raw_view);
struct debug_view debug_hex_ascii_view = {
"hex_ascii",
NULL,
......@@ -1385,32 +1369,6 @@ static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
return rc; /* number of input characters */
}
/*
* prints debug header in raw format
*/
static int debug_raw_header_fn(debug_info_t *id, struct debug_view *view,
int area, debug_entry_t *entry, char *out_buf)
{
int rc;
rc = sizeof(debug_entry_t);
memcpy(out_buf, entry, sizeof(debug_entry_t));
return rc;
}
/*
* prints debug data in raw format
*/
static int debug_raw_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf)
{
int rc;
rc = id->buf_size;
memcpy(out_buf, in_buf, id->buf_size);
return rc;
}
/*
* prints debug data in hex/ascii format
*/
......
......@@ -370,7 +370,7 @@ EXPORT_SYMBOL(sie_exit)
/*
* SVC interrupt handler routine. System calls are synchronous events and
* are executed with interrupts enabled.
* are entered with interrupts disabled.
*/
ENTRY(system_call)
......
......@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/sched/cputime.h>
#include <trace/events/power.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"
......@@ -32,11 +33,12 @@ void enabled_wait(void)
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
trace_cpu_idle_rcuidle(1, smp_processor_id());
local_irq_save(flags);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
local_irq_restore(flags);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
/* Account time spent with enabled wait psw loaded as idle time. */
write_seqcount_begin(&idle->seqcount);
......
......@@ -523,10 +523,8 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
* zero, try to fix up.
*/
entry = s390_search_extables(regs->psw.addr);
if (entry) {
regs->psw.addr = extable_fixup(entry);
if (entry && ex_handle(entry, regs))
return 1;
}
/*
* fixup_exception() could not handle it,
......
......@@ -167,7 +167,7 @@ static struct timer_list lgr_timer;
*/
static void lgr_timer_set(void)
{
mod_timer(&lgr_timer, jiffies + LGR_TIMER_INTERVAL_SECS * HZ);
mod_timer(&lgr_timer, jiffies + msecs_to_jiffies(LGR_TIMER_INTERVAL_SECS * MSEC_PER_SEC));
}
/*
......
......@@ -1127,14 +1127,6 @@ void __init setup_arch(char **cmdline_p)
free_mem_detect_info();
remove_oldmem();
/*
* Make sure all chunks are MAX_ORDER aligned so we don't need the
* extra checks that HOLES_IN_ZONE would require.
*
* Is this still required?
*/
memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
if (is_prot_virt_host())
setup_uv();
setup_memory_end();
......
......@@ -1012,10 +1012,6 @@ void __init smp_prepare_boot_cpu(void)
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
void __init smp_setup_processor_id(void)
{
pcpu_devices[0].address = stap();
......@@ -1145,6 +1141,7 @@ static int smp_cpu_online(unsigned int cpu)
return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
}
static int smp_cpu_pre_down(unsigned int cpu)
{
struct device *s = &per_cpu(cpu_device, cpu)->dev;
......
......@@ -237,7 +237,7 @@ static u64 read_tod_clock(struct clocksource *cs)
preempt_disable(); /* protect from changes to steering parameters */
now = get_tod_clock();
adj = tod_steering_end - now;
if (unlikely((s64) adj >= 0))
if (unlikely((s64) adj > 0))
/*
* manually steer by 1 cycle every 2^16 cycles. This
* corresponds to shifting the tod delta by 15. 1s is
......@@ -253,7 +253,7 @@ static struct clocksource clocksource_tod = {
.name = "tod",
.rating = 400,
.read = read_tod_clock,
.mask = -1ULL,
.mask = CLOCKSOURCE_MASK(64),
.mult = 1000,
.shift = 12,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
......@@ -669,7 +669,7 @@ static void stp_work_fn(struct work_struct *work)
* There is a usable clock but the synchonization failed.
* Retry after a second.
*/
mod_timer(&stp_timer, jiffies + HZ);
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
out_unlock:
mutex_unlock(&stp_work_mutex);
......@@ -683,7 +683,7 @@ static struct bus_type stp_subsys = {
.dev_name = "stp",
};
static ssize_t stp_ctn_id_show(struct device *dev,
static ssize_t ctn_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -693,9 +693,9 @@ static ssize_t stp_ctn_id_show(struct device *dev,
*(unsigned long long *) stp_info.ctnid);
}
static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
static DEVICE_ATTR_RO(ctn_id);
static ssize_t stp_ctn_type_show(struct device *dev,
static ssize_t ctn_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -704,9 +704,9 @@ static ssize_t stp_ctn_type_show(struct device *dev,
return sprintf(buf, "%i\n", stp_info.ctn);
}
static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
static DEVICE_ATTR_RO(ctn_type);
static ssize_t stp_dst_offset_show(struct device *dev,
static ssize_t dst_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -715,9 +715,9 @@ static ssize_t stp_dst_offset_show(struct device *dev,
return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
}
static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
static DEVICE_ATTR_RO(dst_offset);
static ssize_t stp_leap_seconds_show(struct device *dev,
static ssize_t leap_seconds_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -726,9 +726,9 @@ static ssize_t stp_leap_seconds_show(struct device *dev,
return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
}
static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
static DEVICE_ATTR_RO(leap_seconds);
static ssize_t stp_stratum_show(struct device *dev,
static ssize_t stratum_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -737,9 +737,9 @@ static ssize_t stp_stratum_show(struct device *dev,
return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
}
static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
static DEVICE_ATTR_RO(stratum);
static ssize_t stp_time_offset_show(struct device *dev,
static ssize_t time_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -748,9 +748,9 @@ static ssize_t stp_time_offset_show(struct device *dev,
return sprintf(buf, "%i\n", (int) stp_info.tto);
}
static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
static DEVICE_ATTR_RO(time_offset);
static ssize_t stp_time_zone_offset_show(struct device *dev,
static ssize_t time_zone_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -759,10 +759,9 @@ static ssize_t stp_time_zone_offset_show(struct device *dev,
return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
}
static DEVICE_ATTR(time_zone_offset, 0400,
stp_time_zone_offset_show, NULL);
static DEVICE_ATTR_RO(time_zone_offset);
static ssize_t stp_timing_mode_show(struct device *dev,
static ssize_t timing_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -771,9 +770,9 @@ static ssize_t stp_timing_mode_show(struct device *dev,
return sprintf(buf, "%i\n", stp_info.tmd);
}
static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
static DEVICE_ATTR_RO(timing_mode);
static ssize_t stp_timing_state_show(struct device *dev,
static ssize_t timing_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -782,16 +781,16 @@ static ssize_t stp_timing_state_show(struct device *dev,
return sprintf(buf, "%i\n", stp_info.tst);
}
static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
static DEVICE_ATTR_RO(timing_state);
static ssize_t stp_online_show(struct device *dev,
static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%i\n", stp_online);
}
static ssize_t stp_online_store(struct device *dev,
static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
......@@ -817,18 +816,14 @@ static ssize_t stp_online_store(struct device *dev,
* Can't use DEVICE_ATTR because the attribute should be named
* stp/online but dev_attr_online already exists in this file ..
*/
static struct device_attribute dev_attr_stp_online = {
.attr = { .name = "online", .mode = 0600 },
.show = stp_online_show,
.store = stp_online_store,
};
static DEVICE_ATTR_RW(online);
static struct device_attribute *stp_attributes[] = {
&dev_attr_ctn_id,
&dev_attr_ctn_type,
&dev_attr_dst_offset,
&dev_attr_leap_seconds,
&dev_attr_stp_online,
&dev_attr_online,
&dev_attr_stratum,
&dev_attr_time_offset,
&dev_attr_time_zone_offset,
......
......@@ -356,9 +356,9 @@ static atomic_t topology_poll = ATOMIC_INIT(0);
static void set_topology_timer(void)
{
if (atomic_add_unless(&topology_poll, -1, 0))
mod_timer(&topology_timer, jiffies + HZ / 10);
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
else
mod_timer(&topology_timer, jiffies + HZ * 60);
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
}
void topology_expect_change(void)
......
......@@ -50,11 +50,8 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
} else {
const struct exception_table_entry *fixup;
fixup = s390_search_extables(regs->psw.addr);
if (fixup)
regs->psw.addr = extable_fixup(fixup);
else {
if (!fixup || !ex_handle(fixup, regs))
die(regs, str);
}
}
}
......@@ -251,7 +248,7 @@ void monitor_event_exception(struct pt_regs *regs)
case BUG_TRAP_TYPE_NONE:
fixup = s390_search_extables(regs->psw.addr);
if (fixup)
regs->psw.addr = extable_fixup(fixup);
ex_handle(fixup, regs);
break;
case BUG_TRAP_TYPE_WARN:
break;
......
......@@ -14,3 +14,5 @@ KASAN_SANITIZE_uaccess.o := n
obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o
CFLAGS_test_unwind.o += -fno-optimize-sibling-calls
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
// SPDX-License-Identifier: GPL-2.0+
#include <asm/ptrace.h>
#include <linux/error-injection.h>
#include <linux/kprobes.h>
void override_function_with_return(struct pt_regs *regs)
{
/*
* Emulate 'br 14'. 'regs' is captured by kprobes on entry to some
* kernel function.
*/
regs->psw.addr = regs->gprs[14];
}
NOKPROBE_SYMBOL(override_function_with_return);
......@@ -189,7 +189,7 @@ static void cmm_set_timer(void)
del_timer(&cmm_timer);
return;
}
mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds * HZ);
mod_timer(&cmm_timer, jiffies + msecs_to_jiffies(cmm_timeout_seconds * MSEC_PER_SEC));
}
static void cmm_timer_fn(struct timer_list *unused)
......
......@@ -313,15 +313,10 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
goto out_free;
}
rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
if (rc)
goto out_free;
seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (seg->res == NULL) {
rc = -ENOMEM;
goto out_shared;
goto out_free;
}
seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
seg->res->start = seg->start_addr;
......@@ -335,12 +330,17 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
if (rc == SEG_TYPE_SC ||
((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
seg->res->flags |= IORESOURCE_READONLY;
/* Check for overlapping resources before adding the mapping. */
if (request_resource(&iomem_resource, seg->res)) {
rc = -EBUSY;
kfree(seg->res);
goto out_shared;
goto out_free_resource;
}
rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
if (rc)
goto out_resource;
if (do_nonshared)
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
&start_addr, &end_addr);
......@@ -351,14 +351,14 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
rc = diag_cc;
goto out_resource;
goto out_mapping;
}
if (diag_cc > 1) {
pr_warn("Loading DCSS %s failed with rc=%ld\n", name, end_addr);
rc = dcss_diag_translate_rc(end_addr);
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
goto out_resource;
goto out_mapping;
}
seg->start_addr = start_addr;
seg->end = end_addr;
......@@ -377,11 +377,12 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
(void*) seg->end, segtype_string[seg->vm_segtype]);
}
goto out;
out_mapping:
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
out_resource:
release_resource(seg->res);
out_free_resource:
kfree(seg->res);
out_shared:
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
out_free:
kfree(seg);
out:
......@@ -400,8 +401,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
* -EIO : could not perform query or load diagnose
* -ENOENT : no such segment
* -EOPNOTSUPP: multi-part segment cannot be used with linux
* -ENOSPC : segment cannot be used (overlaps with storage)
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
* -EBUSY : segment cannot be used (overlaps with dcss or storage)
* -ERANGE : segment cannot be used (exceeds kernel mapping range)
* -EPERM : segment is currently loaded with incompatible permissions
* -ENOMEM : out of memory
......@@ -626,10 +626,6 @@ void segment_warning(int rc, char *seg_name)
pr_err("DCSS %s has multiple page ranges and cannot be "
"loaded or queried\n", seg_name);
break;
case -ENOSPC:
pr_err("DCSS %s overlaps with used storage and cannot "
"be loaded\n", seg_name);
break;
case -EBUSY:
pr_err("%s needs used memory resources and cannot be "
"loaded or queried\n", seg_name);
......
......@@ -255,10 +255,8 @@ static noinline void do_no_context(struct pt_regs *regs)
/* Are we prepared to handle this kernel fault? */
fixup = s390_search_extables(regs->psw.addr);
if (fixup) {
regs->psw.addr = extable_fixup(fixup);
if (fixup && ex_handle(fixup, regs))
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
......@@ -376,7 +374,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
* routines.
*
* interruption code (int_code):
* 04 Protection -> Write-Protection (suprression)
* 04 Protection -> Write-Protection (suppression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
......
This diff is collapsed.
......@@ -49,6 +49,7 @@ struct bpf_jit {
int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int excnt; /* Number of exception table entries */
int labels[1]; /* Labels for local jumps */
};
......@@ -588,6 +589,84 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
}
}
static int get_probe_mem_regno(const u8 *insn)
{
/*
* insn must point to llgc, llgh, llgf or lg, which have destination
* register at the same position.
*/
if (insn[0] != 0xe3) /* common llgc, llgh, llgf and lg prefix */
return -1;
if (insn[5] != 0x90 && /* llgc */
insn[5] != 0x91 && /* llgh */
insn[5] != 0x16 && /* llgf */
insn[5] != 0x04) /* lg */
return -1;
return insn[1] >> 4;
}
static bool ex_handler_bpf(const struct exception_table_entry *x,
struct pt_regs *regs)
{
int regno;
u8 *insn;
regs->psw.addr = extable_fixup(x);
insn = (u8 *)__rewind_psw(regs->psw, regs->int_code >> 16);
regno = get_probe_mem_regno(insn);
if (WARN_ON_ONCE(regno < 0))
/* JIT bug - unexpected instruction. */
return false;
regs->gprs[regno] = 0;
return true;
}
static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
int probe_prg, int nop_prg)
{
struct exception_table_entry *ex;
s64 delta;
u8 *insn;
int prg;
int i;
if (!fp->aux->extable)
/* Do nothing during early JIT passes. */
return 0;
insn = jit->prg_buf + probe_prg;
if (WARN_ON_ONCE(get_probe_mem_regno(insn) < 0))
/* JIT bug - unexpected probe instruction. */
return -1;
if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
/* JIT bug - gap between probe and nop instructions. */
return -1;
for (i = 0; i < 2; i++) {
if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
/* Verifier bug - not enough entries. */
return -1;
ex = &fp->aux->extable[jit->excnt];
/* Add extable entries for probe and nop instructions. */
prg = i == 0 ? probe_prg : nop_prg;
delta = jit->prg_buf + prg - (u8 *)&ex->insn;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - code and extable must be close. */
return -1;
ex->insn = delta;
/*
* Always land on the nop. Note that extable infrastructure
* ignores fixup field, it is handled by ex_handler_bpf().
*/
delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - landing pad and extable must be close. */
return -1;
ex->fixup = delta;
ex->handler = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
jit->excnt++;
}
return 0;
}
/*
* Compile one eBPF instruction into s390x code
*
......@@ -604,7 +683,14 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
u32 *addrs = jit->addrs;
s32 imm = insn->imm;
s16 off = insn->off;
int probe_prg = -1;
unsigned int mask;
int nop_prg;
int err;
if (BPF_CLASS(insn->code) == BPF_LDX &&
BPF_MODE(insn->code) == BPF_PROBE_MEM)
probe_prg = jit->prg;
switch (insn->code) {
/*
......@@ -1119,6 +1205,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
* BPF_LDX
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* llgc %dst,0(off,%src) */
EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
jit->seen |= SEEN_MEM;
......@@ -1126,6 +1213,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
insn_count = 2;
break;
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
/* llgh %dst,0(off,%src) */
EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
jit->seen |= SEEN_MEM;
......@@ -1133,6 +1221,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
insn_count = 2;
break;
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
/* llgf %dst,off(%src) */
jit->seen |= SEEN_MEM;
EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
......@@ -1140,6 +1229,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
insn_count = 2;
break;
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/* lg %dst,0(off,%src) */
jit->seen |= SEEN_MEM;
EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
......@@ -1485,6 +1575,23 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
pr_err("Unknown opcode %02x\n", insn->code);
return -1;
}
if (probe_prg != -1) {
/*
* Handlers of certain exceptions leave psw.addr pointing to
* the instruction directly after the failing one. Therefore,
* create two exception table entries and also add a nop in
* case two probing instructions come directly after each
* other.
*/
nop_prg = jit->prg;
/* bcr 0,%0 */
_EMIT2(0x0700);
err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
if (err < 0)
return err;
}
return insn_count;
}
......@@ -1527,6 +1634,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
jit->lit32 = jit->lit32_start;
jit->lit64 = jit->lit64_start;
jit->prg = 0;
jit->excnt = 0;
bpf_jit_prologue(jit, stack_depth);
if (bpf_set_addr(jit, 0) < 0)
......@@ -1551,6 +1659,12 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
jit->lit64_start = ALIGN(jit->lit64_start, 8);
jit->size = jit->lit64_start + lit64_size;
jit->size_prg = jit->prg;
if (WARN_ON_ONCE(fp->aux->extable &&
jit->excnt != fp->aux->num_exentries))
/* Verifier bug - too many entries. */
return -1;
return 0;
}
......@@ -1565,6 +1679,29 @@ struct s390_jit_data {
int pass;
};
static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
struct bpf_prog *fp)
{
struct bpf_binary_header *header;
u32 extable_size;
u32 code_size;
/* We need two entries per insn. */
fp->aux->num_exentries *= 2;
code_size = roundup(jit->size,
__alignof__(struct exception_table_entry));
extable_size = fp->aux->num_exentries *
sizeof(struct exception_table_entry);
header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
8, jit_fill_hole);
if (!header)
return NULL;
fp->aux->extable = (struct exception_table_entry *)
(jit->prg_buf + code_size);
return header;
}
/*
* Compile eBPF program "fp"
*/
......@@ -1631,7 +1768,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
/*
* Final pass: Allocate and generate program
*/
header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 8, jit_fill_hole);
header = bpf_jit_alloc(&jit, fp);
if (!header) {
fp = orig_fp;
goto free_addrs;
......
......@@ -155,10 +155,12 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
return -EINVAL;
/*
* Only support read access to MIO capable devices on a MIO enabled
* system. Otherwise we would have to check for every address if it is
* a special ZPCI_ADDR and we would have to do a get_pfn() which we
* don't need for MIO capable devices.
* We only support write access to MIO capable devices if we are on
* a MIO enabled system. Otherwise we would have to check for every
* address if it is a special ZPCI_ADDR and would have to do
* a get_pfn() which we don't need for MIO capable devices. Currently
* ISM devices are the only devices without MIO support and there is no
* known need for accessing these from userspace.
*/
if (static_branch_likely(&have_mio)) {
ret = __memcpy_toio_inuser((void __iomem *) mmio_addr,
......@@ -282,10 +284,12 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
return -EINVAL;
/*
* Only support write access to MIO capable devices on a MIO enabled
* system. Otherwise we would have to check for every address if it is
* a special ZPCI_ADDR and we would have to do a get_pfn() which we
* don't need for MIO capable devices.
* We only support read access to MIO capable devices if we are on
* a MIO enabled system. Otherwise we would have to check for every
* address if it is a special ZPCI_ADDR and would have to do
* a get_pfn() which we don't need for MIO capable devices. Currently
* ISM devices are the only devices without MIO support and there is no
* known need for accessing these from userspace.
*/
if (static_branch_likely(&have_mio)) {
ret = __memcpy_fromio_inuser(
......
......@@ -556,8 +556,9 @@ tty3270_scroll_backward(struct kbd_data *kbd)
* Pass input line to tty.
*/
static void
tty3270_read_tasklet(struct raw3270_request *rrq)
tty3270_read_tasklet(unsigned long data)
{
struct raw3270_request *rrq = (struct raw3270_request *)data;
static char kreset_data = TW_KR;
struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
char *input;
......@@ -652,8 +653,9 @@ tty3270_issue_read(struct tty3270 *tp, int lock)
* Hang up the tty
*/
static void
tty3270_hangup_tasklet(struct tty3270 *tp)
tty3270_hangup_tasklet(unsigned long data)
{
struct tty3270 *tp = (struct tty3270 *)data;
tty_port_tty_hangup(&tp->port, true);
raw3270_put_view(&tp->view);
}
......@@ -752,11 +754,9 @@ tty3270_alloc_view(void)
tty_port_init(&tp->port);
timer_setup(&tp->timer, tty3270_update, 0);
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
tasklet_init(&tp->readlet, tty3270_read_tasklet,
(unsigned long) tp->read);
tasklet_init(&tp->hanglet,
(void (*)(unsigned long)) tty3270_hangup_tasklet,
tasklet_init(&tp->hanglet, tty3270_hangup_tasklet,
(unsigned long) tp);
INIT_WORK(&tp->resize_work, tty3270_resize_work);
......
// SPDX-License-Identifier: GPL-1.0+
/*
* zcore module to export memory content and register sets for creating system
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
* dump format as s390 standalone dumps.
* dumps on SCSI disks (zfcpdump).
*
* For more information please refer to Documentation/s390/zfcpdump.rst
*
......@@ -16,7 +15,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/memblock.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
......@@ -33,8 +31,6 @@
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
enum arch_id {
ARCH_S390 = 0,
ARCH_S390X = 1,
......@@ -48,7 +44,6 @@ struct ipib_info {
static struct debug_info *zcore_dbf;
static int hsa_available;
static struct dentry *zcore_dir;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
......@@ -139,46 +134,6 @@ static void release_hsa(void)
hsa_available = 0;
}
static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, filp->private_data,
memblock.memory.cnt * CHUNK_INFO_SIZE);
}
static int zcore_memmap_open(struct inode *inode, struct file *filp)
{
struct memblock_region *reg;
char *buf;
int i = 0;
buf = kcalloc(memblock.memory.cnt, CHUNK_INFO_SIZE, GFP_KERNEL);
if (!buf) {
return -ENOMEM;
}
for_each_memblock(memory, reg) {
sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
(unsigned long long) reg->base,
(unsigned long long) reg->size);
}
filp->private_data = buf;
return nonseekable_open(inode, filp);
}
static int zcore_memmap_release(struct inode *inode, struct file *filp)
{
kfree(filp->private_data);
return 0;
}
static const struct file_operations zcore_memmap_fops = {
.owner = THIS_MODULE,
.read = zcore_memmap_read,
.open = zcore_memmap_open,
.release = zcore_memmap_release,
.llseek = no_llseek,
};
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
......@@ -335,17 +290,11 @@ static int __init zcore_init(void)
rc = -ENOMEM;
goto fail;
}
zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
NULL, &zcore_memmap_fops);
if (!zcore_memmap_file) {
rc = -ENOMEM;
goto fail_dir;
}
zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
NULL, &zcore_reipl_fops);
if (!zcore_reipl_file) {
rc = -ENOMEM;
goto fail_memmap_file;
goto fail_dir;
}
zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
NULL, &zcore_hsa_fops);
......@@ -357,8 +306,6 @@ static int __init zcore_init(void)
fail_reipl_file:
debugfs_remove(zcore_reipl_file);
fail_memmap_file:
debugfs_remove(zcore_memmap_file);
fail_dir:
debugfs_remove(zcore_dir);
fail:
......
......@@ -15,7 +15,6 @@
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
......@@ -166,11 +165,7 @@ struct qdio_dev_perf_stat {
} ____cacheline_aligned;
struct qdio_queue_perf_stat {
/*
* Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
* Since max. 127 SBALs are scanned reuse entry for 128 as queue full
* aka 127 SBALs found.
*/
/* Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. */
unsigned int nr_sbals[8];
unsigned int nr_sbal_error;
unsigned int nr_sbal_nop;
......@@ -185,8 +180,6 @@ struct qdio_input_q {
/* Batch of SBALs that we processed while polling the queue: */
unsigned int batch_start;
unsigned int batch_count;
/* last time of noticing incoming data */
u64 timestamp;
};
struct qdio_output_q {
......
......@@ -165,7 +165,7 @@ static int qstat_show(struct seq_file *m, void *v)
}
seq_printf(m, "\n1 2.. 4.. 8.. "
"16.. 32.. 64.. 127\n");
"16.. 32.. 64.. 128\n");
for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
......
......@@ -413,15 +413,8 @@ static inline void qdio_stop_polling(struct qdio_q *q)
static inline void account_sbals(struct qdio_q *q, unsigned int count)
{
int pos;
q->q_stats.nr_sbal_total += count;
if (count == QDIO_MAX_BUFFERS_MASK) {
q->q_stats.nr_sbals[7]++;
return;
}
pos = ilog2(count);
q->q_stats.nr_sbals[pos]++;
q->q_stats.nr_sbals[ilog2(count)]++;
}
static void process_buffer_error(struct qdio_q *q, unsigned int start,
......@@ -464,11 +457,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
q->timestamp = get_tod_clock_fast();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
* would return 0.
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
count = atomic_read(&q->nr_buf_used);
if (!count)
return 0;
......@@ -521,14 +510,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
{
int count;
count = get_inbound_buffer_frontier(q, start);
if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
q->u.in.timestamp = get_tod_clock();
return count;
return get_inbound_buffer_frontier(q, start);
}
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
......@@ -546,22 +528,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
/* more work coming */
return 0;
if (is_thinint_irq(q->irq_ptr))
return 1;
/* don't poll under z/VM */
if (MACHINE_IS_VM)
return 1;
/*
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
return 1;
} else
return 0;
return 1;
}
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
......
This diff is collapsed.
......@@ -83,39 +83,39 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_INTR_ENABLED 1 /* AP interrupt enabled */
/*
* AP device states
* AP queue state machine states
*/
enum ap_state {
AP_STATE_RESET_START,
AP_STATE_RESET_WAIT,
AP_STATE_SETIRQ_WAIT,
AP_STATE_IDLE,
AP_STATE_WORKING,
AP_STATE_QUEUE_FULL,
AP_STATE_REMOVE, /* about to be removed from driver */
AP_STATE_UNBOUND, /* momentary not bound to a driver */
AP_STATE_BORKED, /* broken */
NR_AP_STATES
enum ap_sm_state {
AP_SM_STATE_RESET_START,
AP_SM_STATE_RESET_WAIT,
AP_SM_STATE_SETIRQ_WAIT,
AP_SM_STATE_IDLE,
AP_SM_STATE_WORKING,
AP_SM_STATE_QUEUE_FULL,
AP_SM_STATE_REMOVE, /* about to be removed from driver */
AP_SM_STATE_UNBOUND, /* momentary not bound to a driver */
AP_SM_STATE_BORKED, /* broken */
NR_AP_SM_STATES
};
/*
* AP device events
* AP queue state machine events
*/
enum ap_event {
AP_EVENT_POLL,
AP_EVENT_TIMEOUT,
NR_AP_EVENTS
enum ap_sm_event {
AP_SM_EVENT_POLL,
AP_SM_EVENT_TIMEOUT,
NR_AP_SM_EVENTS
};
/*
* AP wait behaviour
* AP queue state wait behaviour
*/
enum ap_wait {
AP_WAIT_AGAIN, /* retry immediately */
AP_WAIT_TIMEOUT, /* wait for timeout */
AP_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
AP_WAIT_NONE, /* no wait */
NR_AP_WAIT
enum ap_sm_wait {
AP_SM_WAIT_AGAIN, /* retry immediately */
AP_SM_WAIT_TIMEOUT, /* wait for timeout */
AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
AP_SM_WAIT_NONE, /* no wait */
NR_AP_SM_WAIT
};
struct ap_device;
......@@ -172,7 +172,7 @@ struct ap_queue {
ap_qid_t qid; /* AP queue id. */
int interrupt; /* indicate if interrupts are enabled */
int queue_count; /* # messages currently on AP queue. */
enum ap_state state; /* State of the AP device. */
enum ap_sm_state sm_state; /* ap queue state machine state */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
u64 total_request_count; /* # requests ever for this AP device.*/
......@@ -185,22 +185,23 @@ struct ap_queue {
#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
struct ap_message {
struct list_head list; /* Request queueing. */
unsigned long long psmid; /* Message id. */
void *message; /* Pointer to message buffer. */
size_t length; /* Message length. */
void *msg; /* Pointer to message buffer. */
unsigned int len; /* Message length. */
u32 flags; /* Flags, see AP_MSG_FLAG_xxx */
int rc; /* Return code for this message */
void *private; /* ap driver private pointer. */
unsigned int special:1; /* Used for special commands. */
/* receive is called from tasklet context */
void (*receive)(struct ap_queue *, struct ap_message *,
struct ap_message *);
};
#define AP_MSG_FLAG_SPECIAL (1 << 16) /* flag msg as 'special' with NQAP */
/**
* ap_init_message() - Initialize ap_message.
* Initialize a message before using. Otherwise this might result in
......@@ -218,7 +219,7 @@ static inline void ap_init_message(struct ap_message *ap_msg)
*/
static inline void ap_release_message(struct ap_message *ap_msg)
{
kzfree(ap_msg->message);
kzfree(ap_msg->msg);
kzfree(ap_msg->private);
}
......@@ -230,15 +231,15 @@ static inline void ap_release_message(struct ap_message *ap_msg)
int ap_send(ap_qid_t, unsigned long long, void *, size_t);
int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_queue *aq);
void *ap_airq_ptr(void);
void ap_wait(enum ap_wait wait);
void ap_wait(enum ap_sm_wait wait);
void ap_request_timeout(struct timer_list *t);
void ap_bus_force_rescan(void);
......
This diff is collapsed.
......@@ -1603,8 +1603,8 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
if (rc == 0)
break;
}
if (rc)
return rc;
if (rc)
return rc;
if (is_xts) {
keysize = CCACIPHERTOKENSIZE;
......
......@@ -634,7 +634,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
unsigned int weight, pref_weight;
unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
struct module *mod;
......@@ -718,7 +718,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
unsigned int weight, pref_weight;
unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
struct module *mod;
......@@ -803,7 +803,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
unsigned int weight, pref_weight;
unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
unsigned short *domain, tdom;
int qid = 0, rc = -ENODEV;
......@@ -822,7 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
* domain but a control only domain, use the default domain as target.
*/
tdom = *domain;
if (tdom >= 0 && tdom < AP_DOMAINS &&
if (tdom < AP_DOMAINS &&
!ap_test_config_usage_domain(tdom) &&
ap_test_config_ctrl_domain(tdom) &&
ap_domain_index >= 0)
......@@ -931,7 +931,7 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
struct zcrypt_queue *zq, *pref_zq;
struct ep11_target_dev *targets;
unsigned short target_num;
unsigned int weight, pref_weight;
unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
struct ap_message ap_msg;
int qid = 0, rc = -ENODEV;
......@@ -1040,7 +1040,7 @@ static long zcrypt_rng(char *buffer)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
unsigned int weight, pref_weight;
unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
struct ap_message ap_msg;
unsigned int domain;
......@@ -1298,99 +1298,119 @@ static int zcrypt_requestq_count(void)
return requestq_count;
}
static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct ap_perms *perms =
(struct ap_perms *) filp->private_data;
struct ica_rsa_modexpo mex;
struct ica_rsa_modexpo __user *umex = (void __user *) arg;
rc = zcrypt_check_ioctl(perms, cmd);
if (rc)
return rc;
switch (cmd) {
case ICARSAMODEXPO: {
struct ica_rsa_modexpo __user *umex = (void __user *) arg;
struct ica_rsa_modexpo mex;
if (copy_from_user(&mex, umex, sizeof(mex)))
return -EFAULT;
if (copy_from_user(&mex, umex, sizeof(mex)))
return -EFAULT;
do {
rc = zcrypt_rsa_modexpo(perms, &mex);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_modexpo(perms, &mex);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_modexpo(perms, &mex);
} while (rc == -EAGAIN);
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
}
return put_user(mex.outputdatalength, &umex->outputdatalength);
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
}
case ICARSACRT: {
struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
struct ica_rsa_modexpo_crt crt;
return put_user(mex.outputdatalength, &umex->outputdatalength);
}
if (copy_from_user(&crt, ucrt, sizeof(crt)))
return -EFAULT;
static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct ica_rsa_modexpo_crt crt;
struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
if (copy_from_user(&crt, ucrt, sizeof(crt)))
return -EFAULT;
do {
rc = zcrypt_rsa_crt(perms, &crt);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_crt(perms, &crt);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_crt(perms, &crt);
} while (rc == -EAGAIN);
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
return rc;
}
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
return rc;
}
case ZSECSENDCPRB: {
struct ica_xcRB __user *uxcRB = (void __user *) arg;
struct ica_xcRB xcRB;
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
}
if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
return -EFAULT;
static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct ica_xcRB xcRB;
struct ica_xcRB __user *uxcRB = (void __user *) arg;
if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
return -EFAULT;
do {
rc = _zcrypt_send_cprb(perms, &xcRB);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_cprb(perms, &xcRB);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_cprb(perms, &xcRB);
} while (rc == -EAGAIN);
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
rc, xcRB.status);
if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
return -EFAULT;
return rc;
}
case ZSENDEP11CPRB: {
struct ep11_urb __user *uxcrb = (void __user *)arg;
struct ep11_urb xcrb;
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
rc, xcRB.status);
if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
return -EFAULT;
return rc;
}
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct ep11_urb xcrb;
struct ep11_urb __user *uxcrb = (void __user *)arg;
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
}
static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc;
struct ap_perms *perms =
(struct ap_perms *) filp->private_data;
rc = zcrypt_check_ioctl(perms, cmd);
if (rc)
return rc;
}
switch (cmd) {
case ICARSAMODEXPO:
return icarsamodexpo_ioctl(perms, arg);
case ICARSACRT:
return icarsacrt_ioctl(perms, arg);
case ZSECSENDCPRB:
return zsecsendcprb_ioctl(perms, arg);
case ZSENDEP11CPRB:
return zsendep11cprb_ioctl(perms, arg);
case ZCRYPT_DEVICE_STATUS: {
struct zcrypt_device_status_ext *device_status;
size_t total_size = MAX_ZDEV_ENTRIES_EXT
......
......@@ -205,9 +205,9 @@ static int alloc_and_prep_cprbmem(size_t paramblen,
preqcblk->rpl_msgbl = cprbplusparamblen;
if (paramblen) {
preqcblk->req_parmb =
((u8 *) preqcblk) + sizeof(struct CPRBX);
((u8 __user *) preqcblk) + sizeof(struct CPRBX);
preqcblk->rpl_parmb =
((u8 *) prepcblk) + sizeof(struct CPRBX);
((u8 __user *) prepcblk) + sizeof(struct CPRBX);
}
*pcprbmem = cprbmem;
......@@ -274,7 +274,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
{
int i, rc, keysize;
int seckeysize;
u8 *mem;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct kgreqparm {
......@@ -320,7 +320,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with KG request */
preqparm = (struct kgreqparm *) preqcblk->req_parmb;
preqparm = (struct kgreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "KG", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
......@@ -377,8 +377,9 @@ int cca_genseckey(u16 cardnr, u16 domain,
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *) ptr;
prepparm = (struct kgrepparm *) ptr;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
......@@ -415,7 +416,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
{
int rc, keysize, seckeysize;
u8 *mem;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct cmreqparm {
......@@ -460,7 +461,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
preqcblk->domain = domain;
/* fill request cprb param block with CM request */
preqparm = (struct cmreqparm *) preqcblk->req_parmb;
preqparm = (struct cmreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "CM", 2);
memcpy(preqparm->rule_array, "AES ", 8);
preqparm->rule_array_len =
......@@ -514,8 +515,9 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *) ptr;
prepparm = (struct cmrepparm *) ptr;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
......@@ -554,7 +556,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
int rc;
u8 *mem;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct uskreqparm {
......@@ -605,7 +607,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with USK request */
preqparm = (struct uskreqparm *) preqcblk->req_parmb;
preqparm = (struct uskreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "US", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
......@@ -646,8 +648,9 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *) ptr;
prepparm = (struct uskrepparm *) ptr;
/* check the returned keyblock */
if (prepparm->lv3.ckb.version != 0x01 &&
......@@ -714,7 +717,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize)
{
int rc;
u8 *mem;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct gkreqparm {
......@@ -796,7 +799,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
preqcblk->req_parml = sizeof(struct gkreqparm);
/* prepare request param block with GK request */
preqparm = (struct gkreqparm *) preqcblk->req_parmb;
preqparm = (struct gkreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "GK", 2);
preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
memcpy(preqparm->rule_array, "AES OP ", 2*8);
......@@ -867,8 +870,9 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct gkrepparm *) prepcblk->rpl_parmb;
ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *) ptr;
prepparm = (struct gkrepparm *) ptr;
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
......@@ -917,7 +921,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
int *key_token_size)
{
int rc, n;
u8 *mem;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct rule_array_block {
......@@ -974,7 +978,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
preqcblk->req_parml = 0;
/* prepare request param block with IP request */
preq_ra_block = (struct rule_array_block *) preqcblk->req_parmb;
preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb;
memcpy(preq_ra_block->subfunc_code, "IP", 2);
preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
memcpy(preq_ra_block->rule_array, rule_array_1, 8);
......@@ -987,7 +991,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
}
/* prepare vud block */
preq_vud_block = (struct vud_block *)
preq_vud_block = (struct vud_block __force *)
(preqcblk->req_parmb + preqcblk->req_parml);
n = complete ? 0 : (clr_key_bit_size + 7) / 8;
preq_vud_block->len = sizeof(struct vud_block) + n;
......@@ -1001,7 +1005,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
preqcblk->req_parml += preq_vud_block->len;
/* prepare key block */
preq_key_block = (struct key_block *)
preq_key_block = (struct key_block __force *)
(preqcblk->req_parmb + preqcblk->req_parml);
n = *key_token_size;
preq_key_block->len = sizeof(struct key_block) + n;
......@@ -1034,8 +1038,9 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *) ptr;
prepparm = (struct iprepparm *) ptr;
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
......@@ -1151,7 +1156,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
int rc;
u8 *mem;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct aureqparm {
......@@ -1208,7 +1213,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
preqcblk->domain = domain;
/* fill request cprb param block with AU request */
preqparm = (struct aureqparm *) preqcblk->req_parmb;
preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "AU", 2);
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len)
......@@ -1257,8 +1262,9 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *) ptr;
prepparm = (struct aurepparm *) ptr;
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x01 &&
......@@ -1347,7 +1353,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with FQ request */
preqparm = (struct fqreqparm *) preqcblk->req_parmb;
preqparm = (struct fqreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "FQ", 2);
memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
preqparm->rule_array_len =
......@@ -1378,8 +1384,9 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *) ptr;
prepparm = (struct fqrepparm *) ptr;
ptr = prepparm->lvdata;
/* check and possibly copy reply rule array */
......
......@@ -25,6 +25,7 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_cex2c.h"
#include "zcrypt_cca_key.h"
#include "zcrypt_ccamisc.h"
#define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */
#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */
......@@ -58,6 +59,91 @@ static struct ap_device_id zcrypt_cex2c_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
/*
* CCA card additional device attributes
*/
static ssize_t cca_serialnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cca_info ci;
struct ap_card *ac = to_ap_card(dev);
struct zcrypt_card *zc = ac->private;
memset(&ci, 0, sizeof(ci));
if (ap_domain_index >= 0)
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
}
static struct device_attribute dev_attr_cca_serialnr =
__ATTR(serialnr, 0444, cca_serialnr_show, NULL);
static struct attribute *cca_card_attrs[] = {
&dev_attr_cca_serialnr.attr,
NULL,
};
static const struct attribute_group cca_card_attr_grp = {
.attrs = cca_card_attrs,
};
/*
* CCA queue additional device attributes
*/
static ssize_t cca_mkvps_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int n = 0;
struct cca_info ci;
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
static const char * const cao_state[] = { "invalid", "valid" };
static const char * const new_state[] = { "empty", "partial", "full" };
memset(&ci, 0, sizeof(ci));
cca_get_info(AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
&ci, zq->online);
if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
new_state[ci.new_mk_state - '1'], ci.new_mkvp);
else
n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
n += scnprintf(buf + n, PAGE_SIZE - n,
"AES CUR: %s 0x%016llx\n",
cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
else
n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
n += scnprintf(buf + n, PAGE_SIZE - n,
"AES OLD: %s 0x%016llx\n",
cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
else
n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
return n;
}
static struct device_attribute dev_attr_cca_mkvps =
__ATTR(mkvps, 0444, cca_mkvps_show, NULL);
static struct attribute *cca_queue_attrs[] = {
&dev_attr_cca_mkvps.attr,
NULL,
};
static const struct attribute_group cca_queue_attr_grp = {
.attrs = cca_queue_attrs,
};
/**
* Large random number detection function. Its sends a message to a CEX2C/CEX3C
* card to find out if large random numbers are supported.
......@@ -87,24 +173,23 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
int rc, i;
ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.msg)
return -ENOMEM;
rng_type6CPRB_msgX(&ap_msg, 4, &domain);
msg = ap_msg.message;
msg = ap_msg.msg;
msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
ap_msg.length);
rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.msg, ap_msg.len);
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
rc = ap_recv(aq->qid, &psmid, ap_msg.msg, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
......@@ -115,13 +200,13 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
goto out_free;
}
reply = ap_msg.message;
reply = ap_msg.msg;
if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
rc = 1;
else
rc = 0;
out_free:
free_page((unsigned long) ap_msg.message);
free_page((unsigned long) ap_msg.msg);
return rc;
}
......@@ -179,6 +264,17 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
if (rc) {
ac->private = NULL;
zcrypt_card_free(zc);
return rc;
}
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_card_attr_grp);
if (rc) {
zcrypt_card_unregister(zc);
ac->private = NULL;
zcrypt_card_free(zc);
}
}
return rc;
......@@ -190,8 +286,11 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
{
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
if (zc)
zcrypt_card_unregister(zc);
}
......@@ -240,7 +339,19 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
if (rc) {
aq->private = NULL;
zcrypt_queue_free(zq);
return rc;
}
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_queue_attr_grp);
if (rc) {
zcrypt_queue_unregister(zq);
aq->private = NULL;
zcrypt_queue_free(zq);
}
}
return rc;
}
......@@ -253,6 +364,8 @@ static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
if (zq)
zcrypt_queue_unregister(zq);
}
......
......@@ -250,7 +250,7 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
for (i = 0; ep11_op_modes[i].mode_txt; i++) {
if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
n += scnprintf(buf + n, PAGE_SIZE - n,
......@@ -345,7 +345,7 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
&di);
for (i = 0; ep11_op_modes[i].mode_txt; i++) {
if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
n += scnprintf(buf + n, PAGE_SIZE - n,
......@@ -529,22 +529,27 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (rc) {
ac->private = NULL;
zcrypt_card_free(zc);
goto out;
return rc;
}
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_card_attr_grp);
if (rc)
if (rc) {
zcrypt_card_unregister(zc);
ac->private = NULL;
zcrypt_card_free(zc);
}
} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&ep11_card_attr_grp);
if (rc)
if (rc) {
zcrypt_card_unregister(zc);
ac->private = NULL;
zcrypt_card_free(zc);
}
}
out:
return rc;
}
......@@ -617,22 +622,27 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
if (rc) {
aq->private = NULL;
zcrypt_queue_free(zq);
goto out;
return rc;
}
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_queue_attr_grp);
if (rc)
if (rc) {
zcrypt_queue_unregister(zq);
aq->private = NULL;
zcrypt_queue_free(zq);
}
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&ep11_queue_attr_grp);
if (rc)
if (rc) {
zcrypt_queue_unregister(zq);
aq->private = NULL;
zcrypt_queue_free(zq);
}
}
out:
return rc;
}
......
......@@ -80,7 +80,7 @@ struct error_hdr {
static inline int convert_error(struct zcrypt_queue *zq,
struct ap_message *reply)
{
struct error_hdr *ehdr = reply->message;
struct error_hdr *ehdr = reply->msg;
int card = AP_QID_CARD(zq->queue->qid);
int queue = AP_QID_QUEUE(zq->queue->qid);
......@@ -127,7 +127,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
} __packed * head = reply->message;
} __packed * head = reply->msg;
unsigned int apfs = *((u32 *)head->fmt2.apfs);
ZCRYPT_DBF(DBF_ERR,
......
......@@ -207,10 +207,10 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod_len = mex->inputdatalength;
if (mod_len <= 128) {
struct type50_meb1_msg *meb1 = ap_msg->message;
struct type50_meb1_msg *meb1 = ap_msg->msg;
memset(meb1, 0, sizeof(*meb1));
ap_msg->length = sizeof(*meb1);
ap_msg->len = sizeof(*meb1);
meb1->header.msg_type_code = TYPE50_TYPE_CODE;
meb1->header.msg_len = sizeof(*meb1);
meb1->keyblock_type = TYPE50_MEB1_FMT;
......@@ -218,10 +218,10 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
inp = meb1->message + sizeof(meb1->message) - mod_len;
} else if (mod_len <= 256) {
struct type50_meb2_msg *meb2 = ap_msg->message;
struct type50_meb2_msg *meb2 = ap_msg->msg;
memset(meb2, 0, sizeof(*meb2));
ap_msg->length = sizeof(*meb2);
ap_msg->len = sizeof(*meb2);
meb2->header.msg_type_code = TYPE50_TYPE_CODE;
meb2->header.msg_len = sizeof(*meb2);
meb2->keyblock_type = TYPE50_MEB2_FMT;
......@@ -229,10 +229,10 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
inp = meb2->message + sizeof(meb2->message) - mod_len;
} else if (mod_len <= 512) {
struct type50_meb3_msg *meb3 = ap_msg->message;
struct type50_meb3_msg *meb3 = ap_msg->msg;
memset(meb3, 0, sizeof(*meb3));
ap_msg->length = sizeof(*meb3);
ap_msg->len = sizeof(*meb3);
meb3->header.msg_type_code = TYPE50_TYPE_CODE;
meb3->header.msg_len = sizeof(*meb3);
meb3->keyblock_type = TYPE50_MEB3_FMT;
......@@ -275,10 +275,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
* 512 byte modulus (4k keys).
*/
if (mod_len <= 128) { /* up to 1024 bit key size */
struct type50_crb1_msg *crb1 = ap_msg->message;
struct type50_crb1_msg *crb1 = ap_msg->msg;
memset(crb1, 0, sizeof(*crb1));
ap_msg->length = sizeof(*crb1);
ap_msg->len = sizeof(*crb1);
crb1->header.msg_type_code = TYPE50_TYPE_CODE;
crb1->header.msg_len = sizeof(*crb1);
crb1->keyblock_type = TYPE50_CRB1_FMT;
......@@ -289,10 +289,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
u = crb1->u + sizeof(crb1->u) - short_len;
inp = crb1->message + sizeof(crb1->message) - mod_len;
} else if (mod_len <= 256) { /* up to 2048 bit key size */
struct type50_crb2_msg *crb2 = ap_msg->message;
struct type50_crb2_msg *crb2 = ap_msg->msg;
memset(crb2, 0, sizeof(*crb2));
ap_msg->length = sizeof(*crb2);
ap_msg->len = sizeof(*crb2);
crb2->header.msg_type_code = TYPE50_TYPE_CODE;
crb2->header.msg_len = sizeof(*crb2);
crb2->keyblock_type = TYPE50_CRB2_FMT;
......@@ -304,10 +304,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
inp = crb2->message + sizeof(crb2->message) - mod_len;
} else if ((mod_len <= 512) && /* up to 4096 bit key size */
(zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
struct type50_crb3_msg *crb3 = ap_msg->message;
struct type50_crb3_msg *crb3 = ap_msg->msg;
memset(crb3, 0, sizeof(*crb3));
ap_msg->length = sizeof(*crb3);
ap_msg->len = sizeof(*crb3);
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
crb3->header.msg_len = sizeof(*crb3);
crb3->keyblock_type = TYPE50_CRB3_FMT;
......@@ -350,7 +350,7 @@ static int convert_type80(struct zcrypt_queue *zq,
char __user *outputdata,
unsigned int outputdatalength)
{
struct type80_hdr *t80h = reply->message;
struct type80_hdr *t80h = reply->msg;
unsigned char *data;
if (t80h->len < sizeof(*t80h) + outputdatalength) {
......@@ -370,7 +370,7 @@ static int convert_type80(struct zcrypt_queue *zq,
BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
else
BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
data = reply->message + t80h->len - outputdatalength;
data = reply->msg + t80h->len - outputdatalength;
if (copy_to_user(outputdata, data, outputdatalength))
return -EFAULT;
return 0;
......@@ -382,7 +382,7 @@ static int convert_response(struct zcrypt_queue *zq,
unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
unsigned char rtype = ((unsigned char *) reply->message)[1];
unsigned char rtype = ((unsigned char *) reply->msg)[1];
switch (rtype) {
case TYPE82_RSP_CODE:
......@@ -422,22 +422,20 @@ static void zcrypt_cex2a_receive(struct ap_queue *aq,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct type80_hdr *t80h;
int length;
int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
t80h = reply->message;
t80h = reply->msg;
if (t80h->type == TYPE80_RSP_CODE) {
if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
length = min_t(int,
CEX2A_MAX_RESPONSE_SIZE, t80h->len);
len = min_t(int, CEX2A_MAX_RESPONSE_SIZE, t80h->len);
else
length = min_t(int,
CEX3A_MAX_RESPONSE_SIZE, t80h->len);
memcpy(msg->message, reply->message, length);
len = min_t(int, CEX3A_MAX_RESPONSE_SIZE, t80h->len);
memcpy(msg->msg, reply->msg, len);
} else
memcpy(msg->message, reply->message, sizeof(error_reply));
memcpy(msg->msg, reply->msg, sizeof(error_reply));
out:
complete((struct completion *) msg->private);
}
......@@ -460,12 +458,10 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
ap_init_message(&ap_msg);
if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
GFP_KERNEL);
ap_msg.msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
else
ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
GFP_KERNEL);
if (!ap_msg.message)
ap_msg.msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg.msg)
return -ENOMEM;
ap_msg.receive = zcrypt_cex2a_receive;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
......@@ -486,7 +482,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, &ap_msg);
out_free:
kfree(ap_msg.message);
kfree(ap_msg.msg);
return rc;
}
......@@ -506,12 +502,10 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
ap_init_message(&ap_msg);
if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
GFP_KERNEL);
ap_msg.msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
else
ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
GFP_KERNEL);
if (!ap_msg.message)
ap_msg.msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg.msg)
return -ENOMEM;
ap_msg.receive = zcrypt_cex2a_receive;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
......@@ -532,7 +526,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, &ap_msg);
out_free:
kfree(ap_msg.message);
kfree(ap_msg.msg);
return rc;
}
......
This diff is collapsed.
......@@ -127,7 +127,7 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
char rule[8];
short int verb_length;
short int key_length;
} __packed * msg = ap_msg->message;
} __packed * msg = ap_msg->msg;
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
......@@ -154,7 +154,7 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
ap_msg->length = sizeof(*msg);
ap_msg->len = sizeof(*msg);
*domain = (unsigned short)msg->cprbx.domain;
}
......
......@@ -107,10 +107,10 @@ struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
if (!zq)
return NULL;
zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
if (!zq->reply.message)
zq->reply.msg = kmalloc(max_response_size, GFP_KERNEL);
if (!zq->reply.msg)
goto out_free;
zq->reply.length = max_response_size;
zq->reply.len = max_response_size;
INIT_LIST_HEAD(&zq->list);
kref_init(&zq->refcount);
return zq;
......@@ -123,7 +123,7 @@ EXPORT_SYMBOL(zcrypt_queue_alloc);
void zcrypt_queue_free(struct zcrypt_queue *zq)
{
kfree(zq->reply.message);
kfree(zq->reply.msg);
kfree(zq);
}
EXPORT_SYMBOL(zcrypt_queue_free);
......
......@@ -77,16 +77,12 @@ struct memblock_type {
* @current_limit: physical address of the current allocation limit
* @memory: usable memory regions
* @reserved: reserved memory regions
* @physmem: all physical memory
*/
struct memblock {
bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
struct memblock_type memory;
struct memblock_type reserved;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
struct memblock_type physmem;
#endif
};
extern struct memblock memblock;
......@@ -145,6 +141,30 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
phys_addr_t *out_start,
phys_addr_t *out_end)
{
extern struct memblock_type physmem;
__next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
out_start, out_end, NULL);
}
/**
* for_each_physmem_range - iterate through physmem areas not included in type.
* @i: u64 used as loop variable
* @type: ptr to memblock_type which excludes from the iteration, can be %NULL
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
*/
#define for_each_physmem_range(i, type, p_start, p_end) \
for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
i != (u64)ULLONG_MAX; \
__next_physmem_range(&i, type, p_start, p_end))
#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
/**
* for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
......
......@@ -44,19 +44,20 @@
* in the system, for instance when the memory is restricted with
* ``mem=`` command line parameter
* * ``reserved`` - describes the regions that were allocated
* * ``physmap`` - describes the actual physical memory regardless of
* the possible restrictions; the ``physmap`` type is only available
* on some architectures.
* * ``physmem`` - describes the actual physical memory available during
* boot regardless of the possible restrictions and memory hot(un)plug;
* the ``physmem`` type is only available on some architectures.
*
* Each region is represented by :c:type:`struct memblock_region` that
* defines the region extents, its attributes and NUMA node id on NUMA
* systems. Every memory type is described by the :c:type:`struct
* memblock_type` which contains an array of memory regions along with
* the allocator metadata. The memory types are nicely wrapped with
* :c:type:`struct memblock`. This structure is statically initialzed
* at build time. The region arrays for the "memory" and "reserved"
* types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
* "physmap" type to %INIT_PHYSMEM_REGIONS.
* the allocator metadata. The "memory" and "reserved" types are nicely
* wrapped with :c:type:`struct memblock`. This structure is statically
* initialized at build time. The region arrays are initially sized to
* %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
* for "reserved". The region array for "physmem" is initially sized to
* %INIT_PHYSMEM_REGIONS.
* The memblock_allow_resize() enables automatic resizing of the region
* arrays during addition of new regions. This feature should be used
* with care so that memory allocated for the region array will not
......@@ -87,8 +88,8 @@
* function frees all the memory to the buddy page allocator.
*
* Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
* memblock data structures will be discarded after the system
* initialization completes.
* memblock data structures (except "physmem") will be discarded after the
* system initialization completes.
*/
#ifndef CONFIG_NEED_MULTIPLE_NODES
......@@ -104,7 +105,7 @@ unsigned long long max_possible_pfn;
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
#endif
struct memblock memblock __initdata_memblock = {
......@@ -118,17 +119,19 @@ struct memblock memblock __initdata_memblock = {
.reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
.reserved.name = "reserved",
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
.physmem.regions = memblock_physmem_init_regions,
.physmem.cnt = 1, /* empty dummy entry */
.physmem.max = INIT_PHYSMEM_REGIONS,
.physmem.name = "physmem",
#endif
.bottom_up = false,
.current_limit = MEMBLOCK_ALLOC_ANYWHERE,
};
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
struct memblock_type physmem = {
.regions = memblock_physmem_init_regions,
.cnt = 1, /* empty dummy entry */
.max = INIT_PHYSMEM_REGIONS,
.name = "physmem",
};
#endif
int memblock_debug __initdata_memblock;
static bool system_has_some_mirror __initdata_memblock = false;
static int memblock_can_resize __initdata_memblock;
......@@ -838,7 +841,7 @@ int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
&base, &end, (void *)_RET_IP_);
return memblock_add_range(&memblock.physmem, base, size, MAX_NUMNODES, 0);
return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
}
#endif
......@@ -1019,12 +1022,10 @@ static bool should_skip_region(struct memblock_region *m, int nid, int flags)
* As both region arrays are sorted, the function advances the two indices
* in lockstep and returns each intersection.
*/
void __init_memblock __next_mem_range(u64 *idx, int nid,
enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b,
phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid)
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid)
{
int idx_a = *idx & 0xffffffff;
int idx_b = *idx >> 32;
......@@ -1924,7 +1925,7 @@ void __init_memblock __memblock_dump_all(void)
memblock_dump(&memblock.memory);
memblock_dump(&memblock.reserved);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
memblock_dump(&memblock.physmem);
memblock_dump(&physmem);
#endif
}
......@@ -2064,8 +2065,8 @@ static int __init memblock_init_debugfs(void)
debugfs_create_file("reserved", 0444, root,
&memblock.reserved, &memblock_debug_fops);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
debugfs_create_file("physmem", 0444, root,
&memblock.physmem, &memblock_debug_fops);
debugfs_create_file("physmem", 0444, root, &physmem,
&memblock_debug_fops);
#endif
return 0;
......
......@@ -255,6 +255,45 @@ static void x86_sort_relative_table(char *extab_image, int image_size)
}
}
static void s390_sort_relative_table(char *extab_image, int image_size)
{
int i;
for (i = 0; i < image_size; i += 16) {
char *loc = extab_image + i;
uint64_t handler;
w(r((uint32_t *)loc) + i, (uint32_t *)loc);
w(r((uint32_t *)(loc + 4)) + (i + 4), (uint32_t *)(loc + 4));
/*
* 0 is a special self-relative handler value, which means that
* handler should be ignored. It is safe, because it means that
* handler field points to itself, which should never happen.
* When creating extable-relative values, keep it as 0, since
* this should never occur either: it would mean that handler
* field points to the first extable entry.
*/
handler = r8((uint64_t *)(loc + 8));
if (handler)
handler += i + 8;
w8(handler, (uint64_t *)(loc + 8));
}
qsort(extab_image, image_size / 16, 16, compare_relative_table);
for (i = 0; i < image_size; i += 16) {
char *loc = extab_image + i;
uint64_t handler;
w(r((uint32_t *)loc) - i, (uint32_t *)loc);
w(r((uint32_t *)(loc + 4)) - (i + 4), (uint32_t *)(loc + 4));
handler = r8((uint64_t *)(loc + 8));
if (handler)
handler -= i + 8;
w8(handler, (uint64_t *)(loc + 8));
}
}
static int do_file(char const *const fname, void *addr)
{
int rc = -1;
......@@ -297,6 +336,8 @@ static int do_file(char const *const fname, void *addr)
custom_sort = x86_sort_relative_table;
break;
case EM_S390:
custom_sort = s390_sort_relative_table;
break;
case EM_AARCH64:
case EM_PARISC:
case EM_PPC:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment