Commit 6a466769 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-6.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Alexander Gordeev:

 - Fix the style of protected key API driver source: use x-mas tree for
   all local variable declarations

 - Rework protected key API driver to not use the struct pkey_protkey
   and pkey_clrkey anymore. Both structures have a fixed size buffer,
   but with the support of ECC protected key these buffers are not big
   enough. Use dynamic buffers internally and transparently for
   userspace

 - Add support for a new 'non CCA clear key token' with ECC clear keys
   supported: ECC P256, ECC P384, ECC P521, ECC ED25519 and ECC ED448.
   This makes it possible to derive a protected key from the ECC clear
   key input via PKEY_KBLOB2PROTK3 ioctl, while currently the only way
   to derive is via PCKMO instruction

 - The s390 PMU of PAI crypto and extension 1 NNPA counters use atomic_t
   for reference counting. Replace this with the proper data type
   refcount_t

 - Select ARCH_SUPPORTS_INT128, but limit this to clang for now, since
   gcc generates inefficient code, which may lead to stack overflows

 - Replace one-element array with flexible-array member in struct
   vfio_ccw_parent and refactor the rest of the code accordingly. Also,
   prefer struct_size() over sizeof() open- coded versions

 - Introduce OS_INFO_FLAGS_ENTRY pointing to a flags field and
   OS_INFO_FLAG_REIPL_CLEAR flag that informs a dumper whether the
   system memory should be cleared or not once dumped

 - Fix a hang when a user attempts to remove a VFIO-AP mediated device
   attached to a guest: add VFIO_DEVICE_GET_IRQ_INFO and
   VFIO_DEVICE_SET_IRQS IOCTLs and wire up the VFIO bus driver callback
   to request a release of the device

 - Fix calculation for R_390_GOTENT relocations for modules

 - Allow any user space process with CAP_PERFMON capability read and
   display the CPU Measurement facility counter sets

 - Rework large statically-defined per-CPU cpu_cf_events data structure
   and replace it with dynamically allocated structures created when a
   perf_event_open() system call is invoked or /dev/hwctr device is
   accessed

* tag 's390-6.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/cpum_cf: rework PER_CPU_DEFINE of struct cpu_cf_events
  s390/cpum_cf: open access to hwctr device for CAP_PERFMON privileged process
  s390/module: fix rela calculation for R_390_GOTENT
  s390/vfio-ap: wire in the vfio_device_ops request callback
  s390/vfio-ap: realize the VFIO_DEVICE_SET_IRQS ioctl
  s390/vfio-ap: realize the VFIO_DEVICE_GET_IRQ_INFO ioctl
  s390/pkey: add support for ecc clear key
  s390/pkey: do not use struct pkey_protkey
  s390/pkey: introduce reverse x-mas trees
  s390/zcore: conditionally clear memory on reipl
  s390/ipl: add REIPL_CLEAR flag to os_info
  vfio/ccw: use struct_size() helper
  vfio/ccw: replace one-element array with flexible-array member
  s390: select ARCH_SUPPORTS_INT128
  s390/pai_ext: replace atomic_t with refcount_t
  s390/pai_crypto: replace atomic_t with refcount_t
parents 8d8026f3 9b9cf3c7
...@@ -117,6 +117,7 @@ config S390 ...@@ -117,6 +117,7 @@ config S390
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && CC_IS_CLANG
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* s390 implementation of the AES Cipher Algorithm with protected keys. * s390 implementation of the AES Cipher Algorithm with protected keys.
* *
* s390 Version: * s390 Version:
* Copyright IBM Corp. 2017,2020 * Copyright IBM Corp. 2017, 2023
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com> * Harald Freudenberger <freude@de.ibm.com>
*/ */
...@@ -132,7 +132,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb, ...@@ -132,7 +132,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb,
if (i > 0 && ret == -EAGAIN && in_task()) if (i > 0 && ret == -EAGAIN && in_task())
if (msleep_interruptible(1000)) if (msleep_interruptible(1000))
return -EINTR; return -EINTR;
ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk); ret = pkey_keyblob2pkey(kb->key, kb->keylen,
pk->protkey, &pk->len, &pk->type);
if (ret == 0) if (ret == 0)
break; break;
} }
...@@ -145,6 +146,7 @@ static inline int __paes_convert_key(struct s390_paes_ctx *ctx) ...@@ -145,6 +146,7 @@ static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
int ret; int ret;
struct pkey_protkey pkey; struct pkey_protkey pkey;
pkey.len = sizeof(pkey.protkey);
ret = __paes_keyblob2pkey(&ctx->kb, &pkey); ret = __paes_keyblob2pkey(&ctx->kb, &pkey);
if (ret) if (ret)
return ret; return ret;
...@@ -414,6 +416,9 @@ static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) ...@@ -414,6 +416,9 @@ static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
{ {
struct pkey_protkey pkey0, pkey1; struct pkey_protkey pkey0, pkey1;
pkey0.len = sizeof(pkey0.protkey);
pkey1.len = sizeof(pkey1.protkey);
if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) || if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
__paes_keyblob2pkey(&ctx->kb[1], &pkey1)) __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
return -EINVAL; return -EINVAL;
......
...@@ -6,4 +6,8 @@ ...@@ -6,4 +6,8 @@
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm-generic/asm-prototypes.h> #include <asm-generic/asm-prototypes.h>
__int128_t __ashlti3(__int128_t a, int b);
__int128_t __ashrti3(__int128_t a, int b);
__int128_t __lshrti3(__int128_t a, int b);
#endif /* _ASM_S390_PROTOTYPES_H */ #endif /* _ASM_S390_PROTOTYPES_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* CP Assist for Cryptographic Functions (CPACF) * CP Assist for Cryptographic Functions (CPACF)
* *
* Copyright IBM Corp. 2003, 2017 * Copyright IBM Corp. 2003, 2023
* Author(s): Thomas Spatzier * Author(s): Thomas Spatzier
* Jan Glauber * Jan Glauber
* Harald Freudenberger (freude@de.ibm.com) * Harald Freudenberger (freude@de.ibm.com)
...@@ -132,6 +132,11 @@ ...@@ -132,6 +132,11 @@
#define CPACF_PCKMO_ENC_AES_128_KEY 0x12 #define CPACF_PCKMO_ENC_AES_128_KEY 0x12
#define CPACF_PCKMO_ENC_AES_192_KEY 0x13 #define CPACF_PCKMO_ENC_AES_192_KEY 0x13
#define CPACF_PCKMO_ENC_AES_256_KEY 0x14 #define CPACF_PCKMO_ENC_AES_256_KEY 0x14
#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
/* /*
* Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION) * Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
......
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
#define OS_INFO_VMCOREINFO 0 #define OS_INFO_VMCOREINFO 0
#define OS_INFO_REIPL_BLOCK 1 #define OS_INFO_REIPL_BLOCK 1
#define OS_INFO_FLAGS_ENTRY 2
#define OS_INFO_FLAG_REIPL_CLEAR (1UL << 0)
struct os_info_entry { struct os_info_entry {
u64 addr; u64 addr;
...@@ -30,8 +33,8 @@ struct os_info { ...@@ -30,8 +33,8 @@ struct os_info {
u16 version_minor; u16 version_minor;
u64 crashkernel_addr; u64 crashkernel_addr;
u64 crashkernel_size; u64 crashkernel_size;
struct os_info_entry entry[2]; struct os_info_entry entry[3];
u8 reserved[4024]; u8 reserved[4004];
} __packed; } __packed;
void os_info_init(void); void os_info_init(void);
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Kernelspace interface to the pkey device driver * Kernelspace interface to the pkey device driver
* *
* Copyright IBM Corp. 2016,2019 * Copyright IBM Corp. 2016, 2023
* *
* Author: Harald Freudenberger <freude@de.ibm.com> * Author: Harald Freudenberger <freude@de.ibm.com>
* *
...@@ -23,6 +23,6 @@ ...@@ -23,6 +23,6 @@
* @return 0 on success, negative errno value on failure * @return 0 on success, negative errno value on failure
*/ */
int pkey_keyblob2pkey(const u8 *key, u32 keylen, int pkey_keyblob2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey); u8 *protkey, u32 *protkeylen, u32 *protkeytype);
#endif /* _KAPI_PKEY_H */ #endif /* _KAPI_PKEY_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Userspace interface to the pkey device driver * Userspace interface to the pkey device driver
* *
* Copyright IBM Corp. 2017, 2019 * Copyright IBM Corp. 2017, 2023
* *
* Author: Harald Freudenberger <freude@de.ibm.com> * Author: Harald Freudenberger <freude@de.ibm.com>
* *
...@@ -36,6 +36,11 @@ ...@@ -36,6 +36,11 @@
#define PKEY_KEYTYPE_AES_192 2 #define PKEY_KEYTYPE_AES_192 2
#define PKEY_KEYTYPE_AES_256 3 #define PKEY_KEYTYPE_AES_256 3
#define PKEY_KEYTYPE_ECC 4 #define PKEY_KEYTYPE_ECC 4
#define PKEY_KEYTYPE_ECC_P256 5
#define PKEY_KEYTYPE_ECC_P384 6
#define PKEY_KEYTYPE_ECC_P521 7
#define PKEY_KEYTYPE_ECC_ED25519 8
#define PKEY_KEYTYPE_ECC_ED448 9
/* the newer ioctls use a pkey_key_type enum for type information */ /* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type { enum pkey_key_type {
......
...@@ -176,6 +176,8 @@ static bool reipl_fcp_clear; ...@@ -176,6 +176,8 @@ static bool reipl_fcp_clear;
static bool reipl_ccw_clear; static bool reipl_ccw_clear;
static bool reipl_eckd_clear; static bool reipl_eckd_clear;
static unsigned long os_info_flags;
static inline int __diag308(unsigned long subcode, unsigned long addr) static inline int __diag308(unsigned long subcode, unsigned long addr)
{ {
union register_pair r1; union register_pair r1;
...@@ -1938,6 +1940,20 @@ static void dump_reipl_run(struct shutdown_trigger *trigger) ...@@ -1938,6 +1940,20 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
struct lowcore *abs_lc; struct lowcore *abs_lc;
unsigned int csum; unsigned int csum;
/*
* Set REIPL_CLEAR flag in os_info flags entry indicating
* 'clear' sysfs attribute has been set on the panicked system
* for specified reipl type.
* Always set for IPL_TYPE_NSS and IPL_TYPE_UNKNOWN.
*/
if ((reipl_type == IPL_TYPE_CCW && reipl_ccw_clear) ||
(reipl_type == IPL_TYPE_ECKD && reipl_eckd_clear) ||
(reipl_type == IPL_TYPE_FCP && reipl_fcp_clear) ||
(reipl_type == IPL_TYPE_NVME && reipl_nvme_clear) ||
reipl_type == IPL_TYPE_NSS ||
reipl_type == IPL_TYPE_UNKNOWN)
os_info_flags |= OS_INFO_FLAG_REIPL_CLEAR;
os_info_entry_add(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags));
csum = (__force unsigned int) csum = (__force unsigned int)
csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
abs_lc = get_abs_lowcore(); abs_lc = get_abs_lowcore();
......
...@@ -352,7 +352,8 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, ...@@ -352,7 +352,8 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
rc = apply_rela_bits(loc, val, 0, 64, 0, write); rc = apply_rela_bits(loc, val, 0, 64, 0, write);
else if (r_type == R_390_GOTENT || else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) { r_type == R_390_GOTPLTENT) {
val += (Elf_Addr) me->mem[MOD_TEXT].base - loc; val += (Elf_Addr)me->mem[MOD_TEXT].base +
me->arch.got_offset - loc;
rc = apply_rela_bits(loc, val, 1, 32, 1, write); rc = apply_rela_bits(loc, val, 1, 32, 1, write);
} }
break; break;
......
This diff is collapsed.
...@@ -36,7 +36,7 @@ struct paicrypt_map { ...@@ -36,7 +36,7 @@ struct paicrypt_map {
unsigned long *page; /* Page for CPU to store counters */ unsigned long *page; /* Page for CPU to store counters */
struct pai_userdata *save; /* Page to store no-zero counters */ struct pai_userdata *save; /* Page to store no-zero counters */
unsigned int active_events; /* # of PAI crypto users */ unsigned int active_events; /* # of PAI crypto users */
unsigned int refcnt; /* Reference count mapped buffers */ refcount_t refcnt; /* Reference count mapped buffers */
enum paievt_mode mode; /* Type of event */ enum paievt_mode mode; /* Type of event */
struct perf_event *event; /* Perf event for sampling */ struct perf_event *event; /* Perf event for sampling */
}; };
...@@ -57,10 +57,11 @@ static void paicrypt_event_destroy(struct perf_event *event) ...@@ -57,10 +57,11 @@ static void paicrypt_event_destroy(struct perf_event *event)
static_branch_dec(&pai_key); static_branch_dec(&pai_key);
mutex_lock(&pai_reserve_mutex); mutex_lock(&pai_reserve_mutex);
debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d" debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
" mode %d refcnt %d\n", __func__, " mode %d refcnt %u\n", __func__,
event->attr.config, event->cpu, event->attr.config, event->cpu,
cpump->active_events, cpump->mode, cpump->refcnt); cpump->active_events, cpump->mode,
if (!--cpump->refcnt) { refcount_read(&cpump->refcnt));
if (refcount_dec_and_test(&cpump->refcnt)) {
debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
__func__, (unsigned long)cpump->page, __func__, (unsigned long)cpump->page,
cpump->save); cpump->save);
...@@ -149,8 +150,10 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) ...@@ -149,8 +150,10 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
/* Allocate memory for counter page and counter extraction. /* Allocate memory for counter page and counter extraction.
* Only the first counting event has to allocate a page. * Only the first counting event has to allocate a page.
*/ */
if (cpump->page) if (cpump->page) {
refcount_inc(&cpump->refcnt);
goto unlock; goto unlock;
}
rc = -ENOMEM; rc = -ENOMEM;
cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL); cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
...@@ -164,18 +167,18 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) ...@@ -164,18 +167,18 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
goto unlock; goto unlock;
} }
rc = 0; rc = 0;
refcount_set(&cpump->refcnt, 1);
unlock: unlock:
/* If rc is non-zero, do not set mode and reference count */ /* If rc is non-zero, do not set mode and reference count */
if (!rc) { if (!rc) {
cpump->refcnt++;
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
: PAI_MODE_COUNTING; : PAI_MODE_COUNTING;
} }
debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d" debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
" mode %d refcnt %d page %#lx save %p rc %d\n", " mode %d refcnt %u page %#lx save %p rc %d\n",
__func__, a->sample_period, cpump->active_events, __func__, a->sample_period, cpump->active_events,
cpump->mode, cpump->refcnt, cpump->mode, refcount_read(&cpump->refcnt),
(unsigned long)cpump->page, cpump->save, rc); (unsigned long)cpump->page, cpump->save, rc);
mutex_unlock(&pai_reserve_mutex); mutex_unlock(&pai_reserve_mutex);
return rc; return rc;
......
...@@ -50,7 +50,7 @@ struct paiext_map { ...@@ -50,7 +50,7 @@ struct paiext_map {
struct pai_userdata *save; /* Area to store non-zero counters */ struct pai_userdata *save; /* Area to store non-zero counters */
enum paievt_mode mode; /* Type of event */ enum paievt_mode mode; /* Type of event */
unsigned int active_events; /* # of PAI Extension users */ unsigned int active_events; /* # of PAI Extension users */
unsigned int refcnt; refcount_t refcnt;
struct perf_event *event; /* Perf event for sampling */ struct perf_event *event; /* Perf event for sampling */
struct paiext_cb *paiext_cb; /* PAI extension control block area */ struct paiext_cb *paiext_cb; /* PAI extension control block area */
}; };
...@@ -60,14 +60,14 @@ struct paiext_mapptr { ...@@ -60,14 +60,14 @@ struct paiext_mapptr {
}; };
static struct paiext_root { /* Anchor to per CPU data */ static struct paiext_root { /* Anchor to per CPU data */
int refcnt; /* Overall active events */ refcount_t refcnt; /* Overall active events */
struct paiext_mapptr __percpu *mapptr; struct paiext_mapptr __percpu *mapptr;
} paiext_root; } paiext_root;
/* Free per CPU data when the last event is removed. */ /* Free per CPU data when the last event is removed. */
static void paiext_root_free(void) static void paiext_root_free(void)
{ {
if (!--paiext_root.refcnt) { if (refcount_dec_and_test(&paiext_root.refcnt)) {
free_percpu(paiext_root.mapptr); free_percpu(paiext_root.mapptr);
paiext_root.mapptr = NULL; paiext_root.mapptr = NULL;
} }
...@@ -80,7 +80,7 @@ static void paiext_root_free(void) ...@@ -80,7 +80,7 @@ static void paiext_root_free(void)
*/ */
static int paiext_root_alloc(void) static int paiext_root_alloc(void)
{ {
if (++paiext_root.refcnt == 1) { if (!refcount_inc_not_zero(&paiext_root.refcnt)) {
/* The memory is already zeroed. */ /* The memory is already zeroed. */
paiext_root.mapptr = alloc_percpu(struct paiext_mapptr); paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
if (!paiext_root.mapptr) { if (!paiext_root.mapptr) {
...@@ -91,6 +91,7 @@ static int paiext_root_alloc(void) ...@@ -91,6 +91,7 @@ static int paiext_root_alloc(void)
*/ */
return -ENOMEM; return -ENOMEM;
} }
refcount_set(&paiext_root.refcnt, 1);
} }
return 0; return 0;
} }
...@@ -122,7 +123,7 @@ static void paiext_event_destroy(struct perf_event *event) ...@@ -122,7 +123,7 @@ static void paiext_event_destroy(struct perf_event *event)
mutex_lock(&paiext_reserve_mutex); mutex_lock(&paiext_reserve_mutex);
cpump->event = NULL; cpump->event = NULL;
if (!--cpump->refcnt) /* Last reference gone */ if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
paiext_free(mp); paiext_free(mp);
paiext_root_free(); paiext_root_free();
mutex_unlock(&paiext_reserve_mutex); mutex_unlock(&paiext_reserve_mutex);
...@@ -163,7 +164,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) ...@@ -163,7 +164,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
rc = -ENOMEM; rc = -ENOMEM;
cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
if (!cpump) if (!cpump)
goto unlock; goto undo;
/* Allocate memory for counter area and counter extraction. /* Allocate memory for counter area and counter extraction.
* These are * These are
...@@ -183,8 +184,9 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) ...@@ -183,8 +184,9 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
GFP_KERNEL); GFP_KERNEL);
if (!cpump->save || !cpump->area || !cpump->paiext_cb) { if (!cpump->save || !cpump->area || !cpump->paiext_cb) {
paiext_free(mp); paiext_free(mp);
goto unlock; goto undo;
} }
refcount_set(&cpump->refcnt, 1);
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
: PAI_MODE_COUNTING; : PAI_MODE_COUNTING;
} else { } else {
...@@ -195,15 +197,15 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) ...@@ -195,15 +197,15 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
if (cpump->mode == PAI_MODE_SAMPLING || if (cpump->mode == PAI_MODE_SAMPLING ||
(cpump->mode == PAI_MODE_COUNTING && a->sample_period)) { (cpump->mode == PAI_MODE_COUNTING && a->sample_period)) {
rc = -EBUSY; rc = -EBUSY;
goto unlock; goto undo;
} }
refcount_inc(&cpump->refcnt);
} }
rc = 0; rc = 0;
cpump->event = event; cpump->event = event;
++cpump->refcnt;
unlock: undo:
if (rc) { if (rc) {
/* Error in allocation of event, decrement anchor. Since /* Error in allocation of event, decrement anchor. Since
* the event in not created, its destroy() function is never * the event in not created, its destroy() function is never
...@@ -211,6 +213,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) ...@@ -211,6 +213,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
*/ */
paiext_root_free(); paiext_root_free();
} }
unlock:
mutex_unlock(&paiext_reserve_mutex); mutex_unlock(&paiext_reserve_mutex);
/* If rc is non-zero, no increment of counter/sampler was done. */ /* If rc is non-zero, no increment of counter/sampler was done. */
return rc; return rc;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Makefile for s390-specific library files.. # Makefile for s390-specific library files..
# #
lib-y += delay.o string.o uaccess.o find.o spinlock.o lib-y += delay.o string.o uaccess.o find.o spinlock.o tishift.o
obj-y += mem.o xor.o obj-y += mem.o xor.o
lib-$(CONFIG_KPROBES) += probes.o lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o lib-$(CONFIG_UPROBES) += probes.o
......
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/nospec-insn.h>
#include <asm/export.h>
.section .noinstr.text, "ax"
GEN_BR_THUNK %r14
SYM_FUNC_START(__ashlti3)
lmg %r0,%r1,0(%r3)
cije %r4,0,1f
lhi %r3,64
sr %r3,%r4
jnh 0f
srlg %r3,%r1,0(%r3)
sllg %r0,%r0,0(%r4)
sllg %r1,%r1,0(%r4)
ogr %r0,%r3
j 1f
0: sllg %r0,%r1,-64(%r4)
lghi %r1,0
1: stmg %r0,%r1,0(%r2)
BR_EX %r14
SYM_FUNC_END(__ashlti3)
EXPORT_SYMBOL(__ashlti3)
SYM_FUNC_START(__ashrti3)
lmg %r0,%r1,0(%r3)
cije %r4,0,1f
lhi %r3,64
sr %r3,%r4
jnh 0f
sllg %r3,%r0,0(%r3)
srlg %r1,%r1,0(%r4)
srag %r0,%r0,0(%r4)
ogr %r1,%r3
j 1f
0: srag %r1,%r0,-64(%r4)
srag %r0,%r0,63
1: stmg %r0,%r1,0(%r2)
BR_EX %r14
SYM_FUNC_END(__ashrti3)
EXPORT_SYMBOL(__ashrti3)
SYM_FUNC_START(__lshrti3)
lmg %r0,%r1,0(%r3)
cije %r4,0,1f
lhi %r3,64
sr %r3,%r4
jnh 0f
sllg %r3,%r0,0(%r3)
srlg %r1,%r1,0(%r4)
srlg %r0,%r0,0(%r4)
ogr %r1,%r3
j 1f
0: srlg %r1,%r0,-64(%r4)
lghi %r0,0
1: stmg %r0,%r1,0(%r2)
BR_EX %r14
SYM_FUNC_END(__lshrti3)
EXPORT_SYMBOL(__lshrti3)
...@@ -51,6 +51,7 @@ static struct dentry *zcore_dir; ...@@ -51,6 +51,7 @@ static struct dentry *zcore_dir;
static struct dentry *zcore_reipl_file; static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file; static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block; static struct ipl_parameter_block *zcore_ipl_block;
static unsigned long os_info_flags;
static DEFINE_MUTEX(hsa_buf_mutex); static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
...@@ -139,7 +140,13 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, ...@@ -139,7 +140,13 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
{ {
if (zcore_ipl_block) { if (zcore_ipl_block) {
diag308(DIAG308_SET, zcore_ipl_block); diag308(DIAG308_SET, zcore_ipl_block);
if (os_info_flags & OS_INFO_FLAG_REIPL_CLEAR)
diag308(DIAG308_LOAD_CLEAR, NULL); diag308(DIAG308_LOAD_CLEAR, NULL);
/* Use special diag308 subcode for CCW normal ipl */
if (zcore_ipl_block->pb0_hdr.pbt == IPL_PBT_CCW)
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
else
diag308(DIAG308_LOAD_NORMAL, NULL);
} }
return count; return count;
} }
...@@ -212,7 +219,10 @@ static int __init check_sdias(void) ...@@ -212,7 +219,10 @@ static int __init check_sdias(void)
*/ */
static int __init zcore_reipl_init(void) static int __init zcore_reipl_init(void)
{ {
struct os_info_entry *entry;
struct ipib_info ipib_info; struct ipib_info ipib_info;
unsigned long os_info_addr;
struct os_info *os_info;
int rc; int rc;
rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info)); rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
...@@ -234,6 +244,35 @@ static int __init zcore_reipl_init(void) ...@@ -234,6 +244,35 @@ static int __init zcore_reipl_init(void)
free_page((unsigned long) zcore_ipl_block); free_page((unsigned long) zcore_ipl_block);
zcore_ipl_block = NULL; zcore_ipl_block = NULL;
} }
/*
* Read the bit-flags field from os_info flags entry.
* Return zero even for os_info read or entry checksum errors in order
* to continue dump processing, considering that os_info could be
* corrupted on the panicked system.
*/
os_info = (void *)__get_free_page(GFP_KERNEL);
if (!os_info)
return -ENOMEM;
rc = memcpy_hsa_kernel(&os_info_addr, __LC_OS_INFO, sizeof(os_info_addr));
if (rc)
goto out;
if (os_info_addr < sclp.hsa_size)
rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE);
else
rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE);
if (rc || os_info_csum(os_info) != os_info->csum)
goto out;
entry = &os_info->entry[OS_INFO_FLAGS_ENTRY];
if (entry->addr && entry->size) {
if (entry->addr < sclp.hsa_size)
rc = memcpy_hsa_kernel(&os_info_flags, entry->addr, sizeof(os_info_flags));
else
rc = memcpy_real(&os_info_flags, entry->addr, sizeof(os_info_flags));
if (rc || (__force u32)csum_partial(&os_info_flags, entry->size, 0) != entry->csum)
os_info_flags = 0;
}
out:
free_page((unsigned long)os_info);
return 0; return 0;
} }
......
...@@ -171,7 +171,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) ...@@ -171,7 +171,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
return -ENODEV; return -ENODEV;
} }
parent = kzalloc(sizeof(*parent), GFP_KERNEL); parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL);
if (!parent) if (!parent)
return -ENOMEM; return -ENOMEM;
......
...@@ -79,7 +79,7 @@ struct vfio_ccw_parent { ...@@ -79,7 +79,7 @@ struct vfio_ccw_parent {
struct mdev_parent parent; struct mdev_parent parent;
struct mdev_type mdev_type; struct mdev_type mdev_type;
struct mdev_type *mdev_types[1]; struct mdev_type *mdev_types[];
}; };
/** /**
......
This diff is collapsed.
...@@ -716,6 +716,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev) ...@@ -716,6 +716,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret) if (ret)
goto err_put_vdev; goto err_put_vdev;
matrix_mdev->req_trigger = NULL;
dev_set_drvdata(&mdev->dev, matrix_mdev); dev_set_drvdata(&mdev->dev, matrix_mdev);
mutex_lock(&matrix_dev->mdevs_lock); mutex_lock(&matrix_dev->mdevs_lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list); list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
...@@ -1735,6 +1736,26 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev) ...@@ -1735,6 +1736,26 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
vfio_ap_mdev_unset_kvm(matrix_mdev); vfio_ap_mdev_unset_kvm(matrix_mdev);
} }
static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
{
struct device *dev = vdev->dev;
struct ap_matrix_mdev *matrix_mdev;
matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev);
if (matrix_mdev->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(dev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(matrix_mdev->req_trigger, 1);
} else if (count == 0) {
dev_notice(dev,
"No device request registered, blocked until released by user\n");
}
}
static int vfio_ap_mdev_get_device_info(unsigned long arg) static int vfio_ap_mdev_get_device_info(unsigned long arg)
{ {
unsigned long minsz; unsigned long minsz;
...@@ -1750,11 +1771,115 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) ...@@ -1750,11 +1771,115 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg)
info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
info.num_regions = 0; info.num_regions = 0;
info.num_irqs = 0; info.num_irqs = VFIO_AP_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
static ssize_t vfio_ap_get_irq_info(unsigned long arg)
{
unsigned long minsz;
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS)
return -EINVAL;
switch (info.index) {
case VFIO_AP_REQ_IRQ_INDEX:
info.count = 1;
info.flags = VFIO_IRQ_INFO_EVENTFD;
break;
default:
return -EINVAL;
}
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
} }
static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg)
{
int ret;
size_t data_size;
unsigned long minsz;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(irq_set, (void __user *)arg, minsz))
return -EFAULT;
ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS,
&data_size);
if (ret)
return ret;
if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER))
return -EINVAL;
return 0;
}
static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev,
unsigned long arg)
{
s32 fd;
void __user *data;
unsigned long minsz;
struct eventfd_ctx *req_trigger;
minsz = offsetofend(struct vfio_irq_set, count);
data = (void __user *)(arg + minsz);
if (get_user(fd, (s32 __user *)data))
return -EFAULT;
if (fd == -1) {
if (matrix_mdev->req_trigger)
eventfd_ctx_put(matrix_mdev->req_trigger);
matrix_mdev->req_trigger = NULL;
} else if (fd >= 0) {
req_trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(req_trigger))
return PTR_ERR(req_trigger);
if (matrix_mdev->req_trigger)
eventfd_ctx_put(matrix_mdev->req_trigger);
matrix_mdev->req_trigger = req_trigger;
} else {
return -EINVAL;
}
return 0;
}
static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
unsigned long arg)
{
int ret;
struct vfio_irq_set irq_set;
ret = vfio_ap_irq_set_init(&irq_set, arg);
if (ret)
return ret;
switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
case VFIO_IRQ_SET_DATA_EVENTFD:
switch (irq_set.index) {
case VFIO_AP_REQ_IRQ_INDEX:
return vfio_ap_set_request_irq(matrix_mdev, arg);
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
...@@ -1770,6 +1895,12 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, ...@@ -1770,6 +1895,12 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
case VFIO_DEVICE_RESET: case VFIO_DEVICE_RESET:
ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
break; break;
case VFIO_DEVICE_GET_IRQ_INFO:
ret = vfio_ap_get_irq_info(arg);
break;
case VFIO_DEVICE_SET_IRQS:
ret = vfio_ap_set_irqs(matrix_mdev, arg);
break;
default: default:
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
break; break;
...@@ -1844,6 +1975,7 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { ...@@ -1844,6 +1975,7 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.bind_iommufd = vfio_iommufd_emulated_bind, .bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind, .unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas, .attach_ioas = vfio_iommufd_emulated_attach_ioas,
.request = vfio_ap_mdev_request
}; };
static struct mdev_driver vfio_ap_matrix_driver = { static struct mdev_driver vfio_ap_matrix_driver = {
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/mdev.h> #include <linux/mdev.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/eventfd.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/vfio.h> #include <linux/vfio.h>
...@@ -103,6 +104,7 @@ struct ap_queue_table { ...@@ -103,6 +104,7 @@ struct ap_queue_table {
* PQAP(AQIC) instruction. * PQAP(AQIC) instruction.
* @mdev: the mediated device * @mdev: the mediated device
* @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev
* @req_trigger eventfd ctx for signaling userspace to return a device
* @apm_add: bitmap of APIDs added to the host's AP configuration * @apm_add: bitmap of APIDs added to the host's AP configuration
* @aqm_add: bitmap of APQIs added to the host's AP configuration * @aqm_add: bitmap of APQIs added to the host's AP configuration
* @adm_add: bitmap of control domain numbers added to the host's AP * @adm_add: bitmap of control domain numbers added to the host's AP
...@@ -117,6 +119,7 @@ struct ap_matrix_mdev { ...@@ -117,6 +119,7 @@ struct ap_matrix_mdev {
crypto_hook pqap_hook; crypto_hook pqap_hook;
struct mdev_device *mdev; struct mdev_device *mdev;
struct ap_queue_table qtable; struct ap_queue_table qtable;
struct eventfd_ctx *req_trigger;
DECLARE_BITMAP(apm_add, AP_DEVICES); DECLARE_BITMAP(apm_add, AP_DEVICES);
DECLARE_BITMAP(aqm_add, AP_DOMAINS); DECLARE_BITMAP(aqm_add, AP_DOMAINS);
DECLARE_BITMAP(adm_add, AP_DOMAINS); DECLARE_BITMAP(adm_add, AP_DOMAINS);
......
...@@ -646,6 +646,15 @@ enum { ...@@ -646,6 +646,15 @@ enum {
VFIO_CCW_NUM_IRQS VFIO_CCW_NUM_IRQS
}; };
/*
* The vfio-ap bus driver makes use of the following IRQ index mapping.
* Unimplemented IRQ types return a count of zero.
*/
enum {
VFIO_AP_REQ_IRQ_INDEX,
VFIO_AP_NUM_IRQS
};
/** /**
* VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12, * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info) * struct vfio_pci_hot_reset_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment