Commit 6a466769 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-6.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Alexander Gordeev:

 - Fix the style of protected key API driver source: use x-mas tree for
   all local variable declarations

 - Rework protected key API driver to not use the struct pkey_protkey
   and pkey_clrkey anymore. Both structures have a fixed size buffer,
   but with the support of ECC protected key these buffers are not big
   enough. Use dynamic buffers internally and transparently for
   userspace

 - Add support for a new 'non CCA clear key token' with ECC clear keys
   supported: ECC P256, ECC P384, ECC P521, ECC ED25519 and ECC ED448.
   This makes it possible to derive a protected key from the ECC clear
   key input via PKEY_KBLOB2PROTK3 ioctl, while currently the only way
   to derive is via PCKMO instruction

 - The s390 PMU of PAI crypto and extension 1 NNPA counters use atomic_t
   for reference counting. Replace this with the proper data type
   refcount_t

 - Select ARCH_SUPPORTS_INT128, but limit this to clang for now, since
   gcc generates inefficient code, which may lead to stack overflows

 - Replace one-element array with flexible-array member in struct
   vfio_ccw_parent and refactor the rest of the code accordingly. Also,
   prefer struct_size() over sizeof() open- coded versions

 - Introduce OS_INFO_FLAGS_ENTRY pointing to a flags field and
   OS_INFO_FLAG_REIPL_CLEAR flag that informs a dumper whether the
   system memory should be cleared or not once dumped

 - Fix a hang when a user attempts to remove a VFIO-AP mediated device
   attached to a guest: add VFIO_DEVICE_GET_IRQ_INFO and
   VFIO_DEVICE_SET_IRQS IOCTLs and wire up the VFIO bus driver callback
   to request a release of the device

 - Fix calculation for R_390_GOTENT relocations for modules

 - Allow any user space process with CAP_PERFMON capability read and
   display the CPU Measurement facility counter sets

 - Rework large statically-defined per-CPU cpu_cf_events data structure
   and replace it with dynamically allocated structures created when a
   perf_event_open() system call is invoked or /dev/hwctr device is
   accessed

* tag 's390-6.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/cpum_cf: rework PER_CPU_DEFINE of struct cpu_cf_events
  s390/cpum_cf: open access to hwctr device for CAP_PERFMON privileged process
  s390/module: fix rela calculation for R_390_GOTENT
  s390/vfio-ap: wire in the vfio_device_ops request callback
  s390/vfio-ap: realize the VFIO_DEVICE_SET_IRQS ioctl
  s390/vfio-ap: realize the VFIO_DEVICE_GET_IRQ_INFO ioctl
  s390/pkey: add support for ecc clear key
  s390/pkey: do not use struct pkey_protkey
  s390/pkey: introduce reverse x-mas trees
  s390/zcore: conditionally clear memory on reipl
  s390/ipl: add REIPL_CLEAR flag to os_info
  vfio/ccw: use struct_size() helper
  vfio/ccw: replace one-element array with flexible-array member
  s390: select ARCH_SUPPORTS_INT128
  s390/pai_ext: replace atomic_t with refcount_t
  s390/pai_crypto: replace atomic_t with refcount_t
parents 8d8026f3 9b9cf3c7
...@@ -117,6 +117,7 @@ config S390 ...@@ -117,6 +117,7 @@ config S390
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && CC_IS_CLANG
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* s390 implementation of the AES Cipher Algorithm with protected keys. * s390 implementation of the AES Cipher Algorithm with protected keys.
* *
* s390 Version: * s390 Version:
* Copyright IBM Corp. 2017,2020 * Copyright IBM Corp. 2017, 2023
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com> * Harald Freudenberger <freude@de.ibm.com>
*/ */
...@@ -132,7 +132,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb, ...@@ -132,7 +132,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb,
if (i > 0 && ret == -EAGAIN && in_task()) if (i > 0 && ret == -EAGAIN && in_task())
if (msleep_interruptible(1000)) if (msleep_interruptible(1000))
return -EINTR; return -EINTR;
ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk); ret = pkey_keyblob2pkey(kb->key, kb->keylen,
pk->protkey, &pk->len, &pk->type);
if (ret == 0) if (ret == 0)
break; break;
} }
...@@ -145,6 +146,7 @@ static inline int __paes_convert_key(struct s390_paes_ctx *ctx) ...@@ -145,6 +146,7 @@ static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
int ret; int ret;
struct pkey_protkey pkey; struct pkey_protkey pkey;
pkey.len = sizeof(pkey.protkey);
ret = __paes_keyblob2pkey(&ctx->kb, &pkey); ret = __paes_keyblob2pkey(&ctx->kb, &pkey);
if (ret) if (ret)
return ret; return ret;
...@@ -414,6 +416,9 @@ static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) ...@@ -414,6 +416,9 @@ static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
{ {
struct pkey_protkey pkey0, pkey1; struct pkey_protkey pkey0, pkey1;
pkey0.len = sizeof(pkey0.protkey);
pkey1.len = sizeof(pkey1.protkey);
if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) || if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
__paes_keyblob2pkey(&ctx->kb[1], &pkey1)) __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
return -EINVAL; return -EINVAL;
......
...@@ -6,4 +6,8 @@ ...@@ -6,4 +6,8 @@
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm-generic/asm-prototypes.h> #include <asm-generic/asm-prototypes.h>
__int128_t __ashlti3(__int128_t a, int b);
__int128_t __ashrti3(__int128_t a, int b);
__int128_t __lshrti3(__int128_t a, int b);
#endif /* _ASM_S390_PROTOTYPES_H */ #endif /* _ASM_S390_PROTOTYPES_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* CP Assist for Cryptographic Functions (CPACF) * CP Assist for Cryptographic Functions (CPACF)
* *
* Copyright IBM Corp. 2003, 2017 * Copyright IBM Corp. 2003, 2023
* Author(s): Thomas Spatzier * Author(s): Thomas Spatzier
* Jan Glauber * Jan Glauber
* Harald Freudenberger (freude@de.ibm.com) * Harald Freudenberger (freude@de.ibm.com)
...@@ -132,6 +132,11 @@ ...@@ -132,6 +132,11 @@
#define CPACF_PCKMO_ENC_AES_128_KEY 0x12 #define CPACF_PCKMO_ENC_AES_128_KEY 0x12
#define CPACF_PCKMO_ENC_AES_192_KEY 0x13 #define CPACF_PCKMO_ENC_AES_192_KEY 0x13
#define CPACF_PCKMO_ENC_AES_256_KEY 0x14 #define CPACF_PCKMO_ENC_AES_256_KEY 0x14
#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
/* /*
* Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION) * Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
......
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
#define OS_INFO_VMCOREINFO 0 #define OS_INFO_VMCOREINFO 0
#define OS_INFO_REIPL_BLOCK 1 #define OS_INFO_REIPL_BLOCK 1
#define OS_INFO_FLAGS_ENTRY 2
#define OS_INFO_FLAG_REIPL_CLEAR (1UL << 0)
struct os_info_entry { struct os_info_entry {
u64 addr; u64 addr;
...@@ -30,8 +33,8 @@ struct os_info { ...@@ -30,8 +33,8 @@ struct os_info {
u16 version_minor; u16 version_minor;
u64 crashkernel_addr; u64 crashkernel_addr;
u64 crashkernel_size; u64 crashkernel_size;
struct os_info_entry entry[2]; struct os_info_entry entry[3];
u8 reserved[4024]; u8 reserved[4004];
} __packed; } __packed;
void os_info_init(void); void os_info_init(void);
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Kernelspace interface to the pkey device driver * Kernelspace interface to the pkey device driver
* *
* Copyright IBM Corp. 2016,2019 * Copyright IBM Corp. 2016, 2023
* *
* Author: Harald Freudenberger <freude@de.ibm.com> * Author: Harald Freudenberger <freude@de.ibm.com>
* *
...@@ -23,6 +23,6 @@ ...@@ -23,6 +23,6 @@
* @return 0 on success, negative errno value on failure * @return 0 on success, negative errno value on failure
*/ */
int pkey_keyblob2pkey(const u8 *key, u32 keylen, int pkey_keyblob2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey); u8 *protkey, u32 *protkeylen, u32 *protkeytype);
#endif /* _KAPI_PKEY_H */ #endif /* _KAPI_PKEY_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Userspace interface to the pkey device driver * Userspace interface to the pkey device driver
* *
* Copyright IBM Corp. 2017, 2019 * Copyright IBM Corp. 2017, 2023
* *
* Author: Harald Freudenberger <freude@de.ibm.com> * Author: Harald Freudenberger <freude@de.ibm.com>
* *
...@@ -32,10 +32,15 @@ ...@@ -32,10 +32,15 @@
#define MINKEYBLOBSIZE SECKEYBLOBSIZE #define MINKEYBLOBSIZE SECKEYBLOBSIZE
/* defines for the type field within the pkey_protkey struct */ /* defines for the type field within the pkey_protkey struct */
#define PKEY_KEYTYPE_AES_128 1 #define PKEY_KEYTYPE_AES_128 1
#define PKEY_KEYTYPE_AES_192 2 #define PKEY_KEYTYPE_AES_192 2
#define PKEY_KEYTYPE_AES_256 3 #define PKEY_KEYTYPE_AES_256 3
#define PKEY_KEYTYPE_ECC 4 #define PKEY_KEYTYPE_ECC 4
#define PKEY_KEYTYPE_ECC_P256 5
#define PKEY_KEYTYPE_ECC_P384 6
#define PKEY_KEYTYPE_ECC_P521 7
#define PKEY_KEYTYPE_ECC_ED25519 8
#define PKEY_KEYTYPE_ECC_ED448 9
/* the newer ioctls use a pkey_key_type enum for type information */ /* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type { enum pkey_key_type {
......
...@@ -176,6 +176,8 @@ static bool reipl_fcp_clear; ...@@ -176,6 +176,8 @@ static bool reipl_fcp_clear;
static bool reipl_ccw_clear; static bool reipl_ccw_clear;
static bool reipl_eckd_clear; static bool reipl_eckd_clear;
static unsigned long os_info_flags;
static inline int __diag308(unsigned long subcode, unsigned long addr) static inline int __diag308(unsigned long subcode, unsigned long addr)
{ {
union register_pair r1; union register_pair r1;
...@@ -1938,6 +1940,20 @@ static void dump_reipl_run(struct shutdown_trigger *trigger) ...@@ -1938,6 +1940,20 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
struct lowcore *abs_lc; struct lowcore *abs_lc;
unsigned int csum; unsigned int csum;
/*
* Set REIPL_CLEAR flag in os_info flags entry indicating
* 'clear' sysfs attribute has been set on the panicked system
* for specified reipl type.
* Always set for IPL_TYPE_NSS and IPL_TYPE_UNKNOWN.
*/
if ((reipl_type == IPL_TYPE_CCW && reipl_ccw_clear) ||
(reipl_type == IPL_TYPE_ECKD && reipl_eckd_clear) ||
(reipl_type == IPL_TYPE_FCP && reipl_fcp_clear) ||
(reipl_type == IPL_TYPE_NVME && reipl_nvme_clear) ||
reipl_type == IPL_TYPE_NSS ||
reipl_type == IPL_TYPE_UNKNOWN)
os_info_flags |= OS_INFO_FLAG_REIPL_CLEAR;
os_info_entry_add(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags));
csum = (__force unsigned int) csum = (__force unsigned int)
csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
abs_lc = get_abs_lowcore(); abs_lc = get_abs_lowcore();
......
...@@ -352,7 +352,8 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, ...@@ -352,7 +352,8 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
rc = apply_rela_bits(loc, val, 0, 64, 0, write); rc = apply_rela_bits(loc, val, 0, 64, 0, write);
else if (r_type == R_390_GOTENT || else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) { r_type == R_390_GOTPLTENT) {
val += (Elf_Addr) me->mem[MOD_TEXT].base - loc; val += (Elf_Addr)me->mem[MOD_TEXT].base +
me->arch.got_offset - loc;
rc = apply_rela_bits(loc, val, 1, 32, 1, write); rc = apply_rela_bits(loc, val, 1, 32, 1, write);
} }
break; break;
......
...@@ -76,6 +76,7 @@ static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest) ...@@ -76,6 +76,7 @@ static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
} }
struct cpu_cf_events { struct cpu_cf_events {
refcount_t refcnt; /* Reference count */
atomic_t ctr_set[CPUMF_CTR_SET_MAX]; atomic_t ctr_set[CPUMF_CTR_SET_MAX];
u64 state; /* For perf_event_open SVC */ u64 state; /* For perf_event_open SVC */
u64 dev_state; /* For /dev/hwctr */ u64 dev_state; /* For /dev/hwctr */
...@@ -88,9 +89,6 @@ struct cpu_cf_events { ...@@ -88,9 +89,6 @@ struct cpu_cf_events {
unsigned int sets; /* # Counter set saved in memory */ unsigned int sets; /* # Counter set saved in memory */
}; };
/* Per-CPU event structure for the counter facility */
static DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
static unsigned int cfdiag_cpu_speed; /* CPU speed for CF_DIAG trailer */ static unsigned int cfdiag_cpu_speed; /* CPU speed for CF_DIAG trailer */
static debug_info_t *cf_dbg; static debug_info_t *cf_dbg;
...@@ -103,6 +101,221 @@ static debug_info_t *cf_dbg; ...@@ -103,6 +101,221 @@ static debug_info_t *cf_dbg;
*/ */
static struct cpumf_ctr_info cpumf_ctr_info; static struct cpumf_ctr_info cpumf_ctr_info;
struct cpu_cf_ptr {
struct cpu_cf_events *cpucf;
};
static struct cpu_cf_root { /* Anchor to per CPU data */
refcount_t refcnt; /* Overall active events */
struct cpu_cf_ptr __percpu *cfptr;
} cpu_cf_root;
/*
* Serialize event initialization and event removal. Both are called from
* user space in task context with perf_event_open() and close()
* system calls.
*
* This mutex serializes functions cpum_cf_alloc_cpu() called at event
* initialization via cpumf_pmu_event_init() and function cpum_cf_free_cpu()
* called at event removal via call back function hw_perf_event_destroy()
* when the event is deleted. They are serialized to enforce correct
* bookkeeping of pointer and reference counts anchored by
* struct cpu_cf_root and the access to cpu_cf_root::refcnt and the
* per CPU pointers stored in cpu_cf_root::cfptr.
*/
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* Get pointer to per-cpu structure.
*
* Function get_cpu_cfhw() is called from
* - cfset_copy_all(): This function is protected by cpus_read_lock(), so
* CPU hot plug remove can not happen. Event removal requires a close()
* first.
*
* Function this_cpu_cfhw() is called from perf common code functions:
* - pmu_{en|dis}able(), pmu_{add|del}()and pmu_{start|stop}():
* All functions execute with interrupts disabled on that particular CPU.
* - cfset_ioctl_{on|off}, cfset_cpu_read(): see comment cfset_copy_all().
*
* Therefore it is safe to access the CPU specific pointer to the event.
*/
static struct cpu_cf_events *get_cpu_cfhw(int cpu)
{
struct cpu_cf_ptr __percpu *p = cpu_cf_root.cfptr;
if (p) {
struct cpu_cf_ptr *q = per_cpu_ptr(p, cpu);
return q->cpucf;
}
return NULL;
}
static struct cpu_cf_events *this_cpu_cfhw(void)
{
return get_cpu_cfhw(smp_processor_id());
}
/* Disable counter sets on dedicated CPU */
static void cpum_cf_reset_cpu(void *flags)
{
lcctl(0);
}
/* Free per CPU data when the last event is removed. */
static void cpum_cf_free_root(void)
{
if (!refcount_dec_and_test(&cpu_cf_root.refcnt))
return;
free_percpu(cpu_cf_root.cfptr);
cpu_cf_root.cfptr = NULL;
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
on_each_cpu(cpum_cf_reset_cpu, NULL, 1);
debug_sprintf_event(cf_dbg, 4, "%s2 root.refcnt %u cfptr %px\n",
__func__, refcount_read(&cpu_cf_root.refcnt),
cpu_cf_root.cfptr);
}
/*
* On initialization of first event also allocate per CPU data dynamically.
* Start with an array of pointers, the array size is the maximum number of
* CPUs possible, which might be larger than the number of CPUs currently
* online.
*/
static int cpum_cf_alloc_root(void)
{
int rc = 0;
if (refcount_inc_not_zero(&cpu_cf_root.refcnt))
return rc;
/* The memory is already zeroed. */
cpu_cf_root.cfptr = alloc_percpu(struct cpu_cf_ptr);
if (cpu_cf_root.cfptr) {
refcount_set(&cpu_cf_root.refcnt, 1);
on_each_cpu(cpum_cf_reset_cpu, NULL, 1);
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
} else {
rc = -ENOMEM;
}
return rc;
}
/* Free CPU counter data structure for a PMU */
static void cpum_cf_free_cpu(int cpu)
{
struct cpu_cf_events *cpuhw;
struct cpu_cf_ptr *p;
mutex_lock(&pmc_reserve_mutex);
/*
* When invoked via CPU hotplug handler, there might be no events
* installed or that particular CPU might not have an
* event installed. This anchor pointer can be NULL!
*/
if (!cpu_cf_root.cfptr)
goto out;
p = per_cpu_ptr(cpu_cf_root.cfptr, cpu);
cpuhw = p->cpucf;
/*
* Might be zero when called from CPU hotplug handler and no event
* installed on that CPU, but on different CPUs.
*/
if (!cpuhw)
goto out;
if (refcount_dec_and_test(&cpuhw->refcnt)) {
kfree(cpuhw);
p->cpucf = NULL;
}
cpum_cf_free_root();
out:
mutex_unlock(&pmc_reserve_mutex);
}
/* Allocate CPU counter data structure for a PMU. Called under mutex lock. */
static int cpum_cf_alloc_cpu(int cpu)
{
struct cpu_cf_events *cpuhw;
struct cpu_cf_ptr *p;
int rc;
mutex_lock(&pmc_reserve_mutex);
rc = cpum_cf_alloc_root();
if (rc)
goto unlock;
p = per_cpu_ptr(cpu_cf_root.cfptr, cpu);
cpuhw = p->cpucf;
if (!cpuhw) {
cpuhw = kzalloc(sizeof(*cpuhw), GFP_KERNEL);
if (cpuhw) {
p->cpucf = cpuhw;
refcount_set(&cpuhw->refcnt, 1);
} else {
rc = -ENOMEM;
}
} else {
refcount_inc(&cpuhw->refcnt);
}
if (rc) {
/*
* Error in allocation of event, decrement anchor. Since
* cpu_cf_event in not created, its destroy() function is not
* invoked. Adjust the reference counter for the anchor.
*/
cpum_cf_free_root();
}
unlock:
mutex_unlock(&pmc_reserve_mutex);
return rc;
}
/*
* Create/delete per CPU data structures for /dev/hwctr interface and events
* created by perf_event_open().
* If cpu is -1, track task on all available CPUs. This requires
* allocation of hardware data structures for all CPUs. This setup handles
* perf_event_open() with task context and /dev/hwctr interface.
* If cpu is non-zero install event on this CPU only. This setup handles
* perf_event_open() with CPU context.
*/
static int cpum_cf_alloc(int cpu)
{
cpumask_var_t mask;
int rc;
if (cpu == -1) {
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
for_each_online_cpu(cpu) {
rc = cpum_cf_alloc_cpu(cpu);
if (rc) {
for_each_cpu(cpu, mask)
cpum_cf_free_cpu(cpu);
break;
}
cpumask_set_cpu(cpu, mask);
}
free_cpumask_var(mask);
} else {
rc = cpum_cf_alloc_cpu(cpu);
}
return rc;
}
static void cpum_cf_free(int cpu)
{
if (cpu == -1) {
for_each_online_cpu(cpu)
cpum_cf_free_cpu(cpu);
} else {
cpum_cf_free_cpu(cpu);
}
}
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */ #define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
/* interval in seconds */ /* interval in seconds */
...@@ -451,10 +664,10 @@ static int validate_ctr_version(const u64 config, enum cpumf_ctr_set set) ...@@ -451,10 +664,10 @@ static int validate_ctr_version(const u64 config, enum cpumf_ctr_set set)
*/ */
static void cpumf_pmu_enable(struct pmu *pmu) static void cpumf_pmu_enable(struct pmu *pmu)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int err; int err;
if (cpuhw->flags & PMU_F_ENABLED) if (!cpuhw || (cpuhw->flags & PMU_F_ENABLED))
return; return;
err = lcctl(cpuhw->state | cpuhw->dev_state); err = lcctl(cpuhw->state | cpuhw->dev_state);
...@@ -471,11 +684,11 @@ static void cpumf_pmu_enable(struct pmu *pmu) ...@@ -471,11 +684,11 @@ static void cpumf_pmu_enable(struct pmu *pmu)
*/ */
static void cpumf_pmu_disable(struct pmu *pmu) static void cpumf_pmu_disable(struct pmu *pmu)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int err;
u64 inactive; u64 inactive;
int err;
if (!(cpuhw->flags & PMU_F_ENABLED)) if (!cpuhw || !(cpuhw->flags & PMU_F_ENABLED))
return; return;
inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
...@@ -487,58 +700,10 @@ static void cpumf_pmu_disable(struct pmu *pmu) ...@@ -487,58 +700,10 @@ static void cpumf_pmu_disable(struct pmu *pmu)
cpuhw->flags &= ~PMU_F_ENABLED; cpuhw->flags &= ~PMU_F_ENABLED;
} }
#define PMC_INIT 0UL
#define PMC_RELEASE 1UL
static void cpum_cf_setup_cpu(void *flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
switch ((unsigned long)flags) {
case PMC_INIT:
cpuhw->flags |= PMU_F_RESERVED;
break;
case PMC_RELEASE:
cpuhw->flags &= ~PMU_F_RESERVED;
break;
}
/* Disable CPU counter sets */
lcctl(0);
debug_sprintf_event(cf_dbg, 5, "%s flags %#x flags %#x state %#llx\n",
__func__, *(int *)flags, cpuhw->flags,
cpuhw->state);
}
/* Initialize the CPU-measurement counter facility */
static int __kernel_cpumcf_begin(void)
{
on_each_cpu(cpum_cf_setup_cpu, (void *)PMC_INIT, 1);
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
}
/* Release the CPU-measurement counter facility */
static void __kernel_cpumcf_end(void)
{
on_each_cpu(cpum_cf_setup_cpu, (void *)PMC_RELEASE, 1);
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
}
/* Number of perf events counting hardware events */
static atomic_t num_events = ATOMIC_INIT(0);
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/* Release the PMU if event is the last perf event */ /* Release the PMU if event is the last perf event */
static void hw_perf_event_destroy(struct perf_event *event) static void hw_perf_event_destroy(struct perf_event *event)
{ {
mutex_lock(&pmc_reserve_mutex); cpum_cf_free(event->cpu);
if (atomic_dec_return(&num_events) == 0)
__kernel_cpumcf_end();
mutex_unlock(&pmc_reserve_mutex);
} }
/* CPUMF <-> perf event mappings for kernel+userspace (basic set) */ /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
...@@ -562,14 +727,6 @@ static const int cpumf_generic_events_user[] = { ...@@ -562,14 +727,6 @@ static const int cpumf_generic_events_user[] = {
[PERF_COUNT_HW_BUS_CYCLES] = -1, [PERF_COUNT_HW_BUS_CYCLES] = -1,
}; };
static void cpumf_hw_inuse(void)
{
mutex_lock(&pmc_reserve_mutex);
if (atomic_inc_return(&num_events) == 1)
__kernel_cpumcf_begin();
mutex_unlock(&pmc_reserve_mutex);
}
static int is_userspace_event(u64 ev) static int is_userspace_event(u64 ev)
{ {
return cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev || return cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
...@@ -653,7 +810,8 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) ...@@ -653,7 +810,8 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
} }
/* Initialize for using the CPU-measurement counter facility */ /* Initialize for using the CPU-measurement counter facility */
cpumf_hw_inuse(); if (cpum_cf_alloc(event->cpu))
return -ENOMEM;
event->destroy = hw_perf_event_destroy; event->destroy = hw_perf_event_destroy;
/* /*
...@@ -756,7 +914,7 @@ static void cpumf_pmu_read(struct perf_event *event) ...@@ -756,7 +914,7 @@ static void cpumf_pmu_read(struct perf_event *event)
static void cpumf_pmu_start(struct perf_event *event, int flags) static void cpumf_pmu_start(struct perf_event *event, int flags)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int i; int i;
...@@ -830,7 +988,7 @@ static int cfdiag_push_sample(struct perf_event *event, ...@@ -830,7 +988,7 @@ static int cfdiag_push_sample(struct perf_event *event,
static void cpumf_pmu_stop(struct perf_event *event, int flags) static void cpumf_pmu_stop(struct perf_event *event, int flags)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int i; int i;
...@@ -857,8 +1015,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags) ...@@ -857,8 +1015,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
false); false);
if (cfdiag_diffctr(cpuhw, event->hw.config_base)) if (cfdiag_diffctr(cpuhw, event->hw.config_base))
cfdiag_push_sample(event, cpuhw); cfdiag_push_sample(event, cpuhw);
} else if (cpuhw->flags & PMU_F_RESERVED) { } else {
/* Only update when PMU not hotplugged off */
hw_perf_event_update(event); hw_perf_event_update(event);
} }
hwc->state |= PERF_HES_UPTODATE; hwc->state |= PERF_HES_UPTODATE;
...@@ -867,7 +1024,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags) ...@@ -867,7 +1024,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
static int cpumf_pmu_add(struct perf_event *event, int flags) static int cpumf_pmu_add(struct perf_event *event, int flags)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
ctr_set_enable(&cpuhw->state, event->hw.config_base); ctr_set_enable(&cpuhw->state, event->hw.config_base);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
...@@ -880,7 +1037,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) ...@@ -880,7 +1037,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
static void cpumf_pmu_del(struct perf_event *event, int flags) static void cpumf_pmu_del(struct perf_event *event, int flags)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int i; int i;
cpumf_pmu_stop(event, PERF_EF_UPDATE); cpumf_pmu_stop(event, PERF_EF_UPDATE);
...@@ -912,29 +1069,83 @@ static struct pmu cpumf_pmu = { ...@@ -912,29 +1069,83 @@ static struct pmu cpumf_pmu = {
.read = cpumf_pmu_read, .read = cpumf_pmu_read,
}; };
static int cpum_cf_setup(unsigned int cpu, unsigned long flags) static struct cfset_session { /* CPUs and counter set bit mask */
{ struct list_head head; /* Head of list of active processes */
local_irq_disable(); } cfset_session = {
cpum_cf_setup_cpu((void *)flags); .head = LIST_HEAD_INIT(cfset_session.head)
local_irq_enable(); };
return 0;
} static refcount_t cfset_opencnt = REFCOUNT_INIT(0); /* Access count */
/*
* Synchronize access to device /dev/hwc. This mutex protects against
* concurrent access to functions cfset_open() and cfset_release().
* Same for CPU hotplug add and remove events triggering
* cpum_cf_online_cpu() and cpum_cf_offline_cpu().
* It also serializes concurrent device ioctl access from multiple
* processes accessing /dev/hwc.
*
* The mutex protects concurrent access to the /dev/hwctr session management
* struct cfset_session and reference counting variable cfset_opencnt.
*/
static DEFINE_MUTEX(cfset_ctrset_mutex);
/*
* CPU hotplug handles only /dev/hwctr device.
* For perf_event_open() the CPU hotplug handling is done on kernel common
* code:
* - CPU add: Nothing is done since a file descriptor can not be created
* and returned to the user.
* - CPU delete: Handled by common code via pmu_disable(), pmu_stop() and
* pmu_delete(). The event itself is removed when the file descriptor is
* closed.
*/
static int cfset_online_cpu(unsigned int cpu); static int cfset_online_cpu(unsigned int cpu);
static int cpum_cf_online_cpu(unsigned int cpu) static int cpum_cf_online_cpu(unsigned int cpu)
{ {
debug_sprintf_event(cf_dbg, 4, "%s cpu %d in_irq %ld\n", __func__, int rc = 0;
cpu, in_interrupt());
cpum_cf_setup(cpu, PMC_INIT); debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d "
return cfset_online_cpu(cpu); "opencnt %d\n", __func__, cpu,
refcount_read(&cpu_cf_root.refcnt),
refcount_read(&cfset_opencnt));
/*
* Ignore notification for perf_event_open().
* Handle only /dev/hwctr device sessions.
*/
mutex_lock(&cfset_ctrset_mutex);
if (refcount_read(&cfset_opencnt)) {
rc = cpum_cf_alloc_cpu(cpu);
if (!rc)
cfset_online_cpu(cpu);
}
mutex_unlock(&cfset_ctrset_mutex);
return rc;
} }
static int cfset_offline_cpu(unsigned int cpu); static int cfset_offline_cpu(unsigned int cpu);
static int cpum_cf_offline_cpu(unsigned int cpu) static int cpum_cf_offline_cpu(unsigned int cpu)
{ {
debug_sprintf_event(cf_dbg, 4, "%s cpu %d\n", __func__, cpu); debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d opencnt %d\n",
cfset_offline_cpu(cpu); __func__, cpu, refcount_read(&cpu_cf_root.refcnt),
return cpum_cf_setup(cpu, PMC_RELEASE); refcount_read(&cfset_opencnt));
/*
* During task exit processing of grouped perf events triggered by CPU
* hotplug processing, pmu_disable() is called as part of perf context
* removal process. Therefore do not trigger event removal now for
* perf_event_open() created events. Perf common code triggers event
* destruction when the event file descriptor is closed.
*
* Handle only /dev/hwctr device sessions.
*/
mutex_lock(&cfset_ctrset_mutex);
if (refcount_read(&cfset_opencnt)) {
cfset_offline_cpu(cpu);
cpum_cf_free_cpu(cpu);
}
mutex_unlock(&cfset_ctrset_mutex);
return 0;
} }
/* Return true if store counter set multiple instruction is available */ /* Return true if store counter set multiple instruction is available */
...@@ -953,13 +1164,13 @@ static void cpumf_measurement_alert(struct ext_code ext_code, ...@@ -953,13 +1164,13 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
return; return;
inc_irq_stat(IRQEXT_CMC); inc_irq_stat(IRQEXT_CMC);
cpuhw = this_cpu_ptr(&cpu_cf_events);
/* /*
* Measurement alerts are shared and might happen when the PMU * Measurement alerts are shared and might happen when the PMU
* is not reserved. Ignore these alerts in this case. * is not reserved. Ignore these alerts in this case.
*/ */
if (!(cpuhw->flags & PMU_F_RESERVED)) cpuhw = this_cpu_cfhw();
if (!cpuhw)
return; return;
/* counter authorization change alert */ /* counter authorization change alert */
...@@ -1039,19 +1250,11 @@ static int __init cpumf_pmu_init(void) ...@@ -1039,19 +1250,11 @@ static int __init cpumf_pmu_init(void)
* counter set via normal file operations. * counter set via normal file operations.
*/ */
static atomic_t cfset_opencnt = ATOMIC_INIT(0); /* Access count */
static DEFINE_MUTEX(cfset_ctrset_mutex);/* Synchronize access to hardware */
struct cfset_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */ struct cfset_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
unsigned int sets; /* Counter set bit mask */ unsigned int sets; /* Counter set bit mask */
atomic_t cpus_ack; /* # CPUs successfully executed func */ atomic_t cpus_ack; /* # CPUs successfully executed func */
}; };
static struct cfset_session { /* CPUs and counter set bit mask */
struct list_head head; /* Head of list of active processes */
} cfset_session = {
.head = LIST_HEAD_INIT(cfset_session.head)
};
struct cfset_request { /* CPUs and counter set bit mask */ struct cfset_request { /* CPUs and counter set bit mask */
unsigned long ctrset; /* Bit mask of counter set to read */ unsigned long ctrset; /* Bit mask of counter set to read */
cpumask_t mask; /* CPU mask to read from */ cpumask_t mask; /* CPU mask to read from */
...@@ -1113,11 +1316,11 @@ static void cfset_session_add(struct cfset_request *p) ...@@ -1113,11 +1316,11 @@ static void cfset_session_add(struct cfset_request *p)
/* Stop all counter sets via ioctl interface */ /* Stop all counter sets via ioctl interface */
static void cfset_ioctl_off(void *parm) static void cfset_ioctl_off(void *parm)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct cfset_call_on_cpu_parm *p = parm; struct cfset_call_on_cpu_parm *p = parm;
int rc; int rc;
/* Check if any counter set used by /dev/hwc */ /* Check if any counter set used by /dev/hwctr */
for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc) for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
if ((p->sets & cpumf_ctr_ctl[rc])) { if ((p->sets & cpumf_ctr_ctl[rc])) {
if (!atomic_dec_return(&cpuhw->ctr_set[rc])) { if (!atomic_dec_return(&cpuhw->ctr_set[rc])) {
...@@ -1141,7 +1344,7 @@ static void cfset_ioctl_off(void *parm) ...@@ -1141,7 +1344,7 @@ static void cfset_ioctl_off(void *parm)
/* Start counter sets on particular CPU */ /* Start counter sets on particular CPU */
static void cfset_ioctl_on(void *parm) static void cfset_ioctl_on(void *parm)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct cfset_call_on_cpu_parm *p = parm; struct cfset_call_on_cpu_parm *p = parm;
int rc; int rc;
...@@ -1163,7 +1366,7 @@ static void cfset_ioctl_on(void *parm) ...@@ -1163,7 +1366,7 @@ static void cfset_ioctl_on(void *parm)
static void cfset_release_cpu(void *p) static void cfset_release_cpu(void *p)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int rc; int rc;
debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n", debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n",
...@@ -1203,27 +1406,41 @@ static int cfset_release(struct inode *inode, struct file *file) ...@@ -1203,27 +1406,41 @@ static int cfset_release(struct inode *inode, struct file *file)
kfree(file->private_data); kfree(file->private_data);
file->private_data = NULL; file->private_data = NULL;
} }
if (!atomic_dec_return(&cfset_opencnt)) if (refcount_dec_and_test(&cfset_opencnt)) { /* Last close */
on_each_cpu(cfset_release_cpu, NULL, 1); on_each_cpu(cfset_release_cpu, NULL, 1);
cpum_cf_free(-1);
}
mutex_unlock(&cfset_ctrset_mutex); mutex_unlock(&cfset_ctrset_mutex);
hw_perf_event_destroy(NULL);
return 0; return 0;
} }
/*
* Open via /dev/hwctr device. Allocate all per CPU resources on the first
* open of the device. The last close releases all per CPU resources.
* Parallel perf_event_open system calls also use per CPU resources.
* These invocations are handled via reference counting on the per CPU data
* structures.
*/
static int cfset_open(struct inode *inode, struct file *file) static int cfset_open(struct inode *inode, struct file *file)
{ {
if (!capable(CAP_SYS_ADMIN)) int rc = 0;
if (!perfmon_capable())
return -EPERM; return -EPERM;
file->private_data = NULL;
mutex_lock(&cfset_ctrset_mutex); mutex_lock(&cfset_ctrset_mutex);
if (atomic_inc_return(&cfset_opencnt) == 1) if (!refcount_inc_not_zero(&cfset_opencnt)) { /* First open */
cfset_session_init(); rc = cpum_cf_alloc(-1);
if (!rc) {
cfset_session_init();
refcount_set(&cfset_opencnt, 1);
}
}
mutex_unlock(&cfset_ctrset_mutex); mutex_unlock(&cfset_ctrset_mutex);
cpumf_hw_inuse();
file->private_data = NULL;
/* nonseekable_open() never fails */ /* nonseekable_open() never fails */
return nonseekable_open(inode, file); return rc ?: nonseekable_open(inode, file);
} }
static int cfset_all_start(struct cfset_request *req) static int cfset_all_start(struct cfset_request *req)
...@@ -1280,7 +1497,7 @@ static int cfset_all_copy(unsigned long arg, cpumask_t *mask) ...@@ -1280,7 +1497,7 @@ static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
ctrset_read = (struct s390_ctrset_read __user *)arg; ctrset_read = (struct s390_ctrset_read __user *)arg;
uptr = ctrset_read->data; uptr = ctrset_read->data;
for_each_cpu(cpu, mask) { for_each_cpu(cpu, mask) {
struct cpu_cf_events *cpuhw = per_cpu_ptr(&cpu_cf_events, cpu); struct cpu_cf_events *cpuhw = get_cpu_cfhw(cpu);
struct s390_ctrset_cpudata __user *ctrset_cpudata; struct s390_ctrset_cpudata __user *ctrset_cpudata;
ctrset_cpudata = uptr; ctrset_cpudata = uptr;
...@@ -1324,7 +1541,7 @@ static size_t cfset_cpuset_read(struct s390_ctrset_setdata *p, int ctrset, ...@@ -1324,7 +1541,7 @@ static size_t cfset_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
/* Read all counter sets. */ /* Read all counter sets. */
static void cfset_cpu_read(void *parm) static void cfset_cpu_read(void *parm)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct cfset_call_on_cpu_parm *p = parm; struct cfset_call_on_cpu_parm *p = parm;
int set, set_size; int set, set_size;
size_t space; size_t space;
...@@ -1348,9 +1565,9 @@ static void cfset_cpu_read(void *parm) ...@@ -1348,9 +1565,9 @@ static void cfset_cpu_read(void *parm)
cpuhw->used += space; cpuhw->used += space;
cpuhw->sets += 1; cpuhw->sets += 1;
} }
debug_sprintf_event(cf_dbg, 4, "%s sets %d used %zd\n", __func__,
cpuhw->sets, cpuhw->used);
} }
debug_sprintf_event(cf_dbg, 4, "%s sets %d used %zd\n", __func__,
cpuhw->sets, cpuhw->used);
} }
static int cfset_all_read(unsigned long arg, struct cfset_request *req) static int cfset_all_read(unsigned long arg, struct cfset_request *req)
...@@ -1502,6 +1719,7 @@ static struct miscdevice cfset_dev = { ...@@ -1502,6 +1719,7 @@ static struct miscdevice cfset_dev = {
.name = S390_HWCTR_DEVICE, .name = S390_HWCTR_DEVICE,
.minor = MISC_DYNAMIC_MINOR, .minor = MISC_DYNAMIC_MINOR,
.fops = &cfset_fops, .fops = &cfset_fops,
.mode = 0666,
}; };
/* Hotplug add of a CPU. Scan through all active processes and add /* Hotplug add of a CPU. Scan through all active processes and add
...@@ -1512,7 +1730,6 @@ static int cfset_online_cpu(unsigned int cpu) ...@@ -1512,7 +1730,6 @@ static int cfset_online_cpu(unsigned int cpu)
struct cfset_call_on_cpu_parm p; struct cfset_call_on_cpu_parm p;
struct cfset_request *rp; struct cfset_request *rp;
mutex_lock(&cfset_ctrset_mutex);
if (!list_empty(&cfset_session.head)) { if (!list_empty(&cfset_session.head)) {
list_for_each_entry(rp, &cfset_session.head, node) { list_for_each_entry(rp, &cfset_session.head, node) {
p.sets = rp->ctrset; p.sets = rp->ctrset;
...@@ -1520,19 +1737,18 @@ static int cfset_online_cpu(unsigned int cpu) ...@@ -1520,19 +1737,18 @@ static int cfset_online_cpu(unsigned int cpu)
cpumask_set_cpu(cpu, &rp->mask); cpumask_set_cpu(cpu, &rp->mask);
} }
} }
mutex_unlock(&cfset_ctrset_mutex);
return 0; return 0;
} }
/* Hotplug remove of a CPU. Scan through all active processes and clear /* Hotplug remove of a CPU. Scan through all active processes and clear
* that CPU from the list of CPUs supplied with ioctl(..., START, ...). * that CPU from the list of CPUs supplied with ioctl(..., START, ...).
* Adjust reference counts.
*/ */
static int cfset_offline_cpu(unsigned int cpu) static int cfset_offline_cpu(unsigned int cpu)
{ {
struct cfset_call_on_cpu_parm p; struct cfset_call_on_cpu_parm p;
struct cfset_request *rp; struct cfset_request *rp;
mutex_lock(&cfset_ctrset_mutex);
if (!list_empty(&cfset_session.head)) { if (!list_empty(&cfset_session.head)) {
list_for_each_entry(rp, &cfset_session.head, node) { list_for_each_entry(rp, &cfset_session.head, node) {
p.sets = rp->ctrset; p.sets = rp->ctrset;
...@@ -1540,7 +1756,6 @@ static int cfset_offline_cpu(unsigned int cpu) ...@@ -1540,7 +1756,6 @@ static int cfset_offline_cpu(unsigned int cpu)
cpumask_clear_cpu(cpu, &rp->mask); cpumask_clear_cpu(cpu, &rp->mask);
} }
} }
mutex_unlock(&cfset_ctrset_mutex);
return 0; return 0;
} }
...@@ -1618,7 +1833,8 @@ static int cfdiag_event_init(struct perf_event *event) ...@@ -1618,7 +1833,8 @@ static int cfdiag_event_init(struct perf_event *event)
} }
/* Initialize for using the CPU-measurement counter facility */ /* Initialize for using the CPU-measurement counter facility */
cpumf_hw_inuse(); if (cpum_cf_alloc(event->cpu))
return -ENOMEM;
event->destroy = hw_perf_event_destroy; event->destroy = hw_perf_event_destroy;
err = cfdiag_event_init2(event); err = cfdiag_event_init2(event);
......
...@@ -36,7 +36,7 @@ struct paicrypt_map { ...@@ -36,7 +36,7 @@ struct paicrypt_map {
unsigned long *page; /* Page for CPU to store counters */ unsigned long *page; /* Page for CPU to store counters */
struct pai_userdata *save; /* Page to store no-zero counters */ struct pai_userdata *save; /* Page to store no-zero counters */
unsigned int active_events; /* # of PAI crypto users */ unsigned int active_events; /* # of PAI crypto users */
unsigned int refcnt; /* Reference count mapped buffers */ refcount_t refcnt; /* Reference count mapped buffers */
enum paievt_mode mode; /* Type of event */ enum paievt_mode mode; /* Type of event */
struct perf_event *event; /* Perf event for sampling */ struct perf_event *event; /* Perf event for sampling */
}; };
...@@ -57,10 +57,11 @@ static void paicrypt_event_destroy(struct perf_event *event) ...@@ -57,10 +57,11 @@ static void paicrypt_event_destroy(struct perf_event *event)
static_branch_dec(&pai_key); static_branch_dec(&pai_key);
mutex_lock(&pai_reserve_mutex); mutex_lock(&pai_reserve_mutex);
debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d" debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
" mode %d refcnt %d\n", __func__, " mode %d refcnt %u\n", __func__,
event->attr.config, event->cpu, event->attr.config, event->cpu,
cpump->active_events, cpump->mode, cpump->refcnt); cpump->active_events, cpump->mode,
if (!--cpump->refcnt) { refcount_read(&cpump->refcnt));
if (refcount_dec_and_test(&cpump->refcnt)) {
debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
__func__, (unsigned long)cpump->page, __func__, (unsigned long)cpump->page,
cpump->save); cpump->save);
...@@ -149,8 +150,10 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) ...@@ -149,8 +150,10 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
/* Allocate memory for counter page and counter extraction. /* Allocate memory for counter page and counter extraction.
* Only the first counting event has to allocate a page. * Only the first counting event has to allocate a page.
*/ */
if (cpump->page) if (cpump->page) {
refcount_inc(&cpump->refcnt);
goto unlock; goto unlock;
}
rc = -ENOMEM; rc = -ENOMEM;
cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL); cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
...@@ -164,18 +167,18 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) ...@@ -164,18 +167,18 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
goto unlock; goto unlock;
} }
rc = 0; rc = 0;
refcount_set(&cpump->refcnt, 1);
unlock: unlock:
/* If rc is non-zero, do not set mode and reference count */ /* If rc is non-zero, do not set mode and reference count */
if (!rc) { if (!rc) {
cpump->refcnt++;
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
: PAI_MODE_COUNTING; : PAI_MODE_COUNTING;
} }
debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d" debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
" mode %d refcnt %d page %#lx save %p rc %d\n", " mode %d refcnt %u page %#lx save %p rc %d\n",
__func__, a->sample_period, cpump->active_events, __func__, a->sample_period, cpump->active_events,
cpump->mode, cpump->refcnt, cpump->mode, refcount_read(&cpump->refcnt),
(unsigned long)cpump->page, cpump->save, rc); (unsigned long)cpump->page, cpump->save, rc);
mutex_unlock(&pai_reserve_mutex); mutex_unlock(&pai_reserve_mutex);
return rc; return rc;
......
...@@ -50,7 +50,7 @@ struct paiext_map { ...@@ -50,7 +50,7 @@ struct paiext_map {
struct pai_userdata *save; /* Area to store non-zero counters */ struct pai_userdata *save; /* Area to store non-zero counters */
enum paievt_mode mode; /* Type of event */ enum paievt_mode mode; /* Type of event */
unsigned int active_events; /* # of PAI Extension users */ unsigned int active_events; /* # of PAI Extension users */
unsigned int refcnt; refcount_t refcnt;
struct perf_event *event; /* Perf event for sampling */ struct perf_event *event; /* Perf event for sampling */
struct paiext_cb *paiext_cb; /* PAI extension control block area */ struct paiext_cb *paiext_cb; /* PAI extension control block area */
}; };
...@@ -60,14 +60,14 @@ struct paiext_mapptr { ...@@ -60,14 +60,14 @@ struct paiext_mapptr {
}; };
static struct paiext_root { /* Anchor to per CPU data */ static struct paiext_root { /* Anchor to per CPU data */
int refcnt; /* Overall active events */ refcount_t refcnt; /* Overall active events */
struct paiext_mapptr __percpu *mapptr; struct paiext_mapptr __percpu *mapptr;
} paiext_root; } paiext_root;
/* Free per CPU data when the last event is removed. */ /* Free per CPU data when the last event is removed. */
static void paiext_root_free(void) static void paiext_root_free(void)
{ {
if (!--paiext_root.refcnt) { if (refcount_dec_and_test(&paiext_root.refcnt)) {
free_percpu(paiext_root.mapptr); free_percpu(paiext_root.mapptr);
paiext_root.mapptr = NULL; paiext_root.mapptr = NULL;
} }
...@@ -80,7 +80,7 @@ static void paiext_root_free(void) ...@@ -80,7 +80,7 @@ static void paiext_root_free(void)
*/ */
static int paiext_root_alloc(void) static int paiext_root_alloc(void)
{ {
if (++paiext_root.refcnt == 1) { if (!refcount_inc_not_zero(&paiext_root.refcnt)) {
/* The memory is already zeroed. */ /* The memory is already zeroed. */
paiext_root.mapptr = alloc_percpu(struct paiext_mapptr); paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
if (!paiext_root.mapptr) { if (!paiext_root.mapptr) {
...@@ -91,6 +91,7 @@ static int paiext_root_alloc(void) ...@@ -91,6 +91,7 @@ static int paiext_root_alloc(void)
*/ */
return -ENOMEM; return -ENOMEM;
} }
refcount_set(&paiext_root.refcnt, 1);
} }
return 0; return 0;
} }
...@@ -122,7 +123,7 @@ static void paiext_event_destroy(struct perf_event *event) ...@@ -122,7 +123,7 @@ static void paiext_event_destroy(struct perf_event *event)
mutex_lock(&paiext_reserve_mutex); mutex_lock(&paiext_reserve_mutex);
cpump->event = NULL; cpump->event = NULL;
if (!--cpump->refcnt) /* Last reference gone */ if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
paiext_free(mp); paiext_free(mp);
paiext_root_free(); paiext_root_free();
mutex_unlock(&paiext_reserve_mutex); mutex_unlock(&paiext_reserve_mutex);
...@@ -163,7 +164,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) ...@@ -163,7 +164,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
rc = -ENOMEM; rc = -ENOMEM;
cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
if (!cpump) if (!cpump)
goto unlock; goto undo;
/* Allocate memory for counter area and counter extraction. /* Allocate memory for counter area and counter extraction.
* These are * These are
...@@ -183,8 +184,9 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) ...@@ -183,8 +184,9 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
GFP_KERNEL); GFP_KERNEL);
if (!cpump->save || !cpump->area || !cpump->paiext_cb) { if (!cpump->save || !cpump->area || !cpump->paiext_cb) {
paiext_free(mp); paiext_free(mp);
goto unlock; goto undo;
} }
refcount_set(&cpump->refcnt, 1);
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
: PAI_MODE_COUNTING; : PAI_MODE_COUNTING;
} else { } else {
...@@ -195,15 +197,15 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) ...@@ -195,15 +197,15 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
if (cpump->mode == PAI_MODE_SAMPLING || if (cpump->mode == PAI_MODE_SAMPLING ||
(cpump->mode == PAI_MODE_COUNTING && a->sample_period)) { (cpump->mode == PAI_MODE_COUNTING && a->sample_period)) {
rc = -EBUSY; rc = -EBUSY;
goto unlock; goto undo;
} }
refcount_inc(&cpump->refcnt);
} }
rc = 0; rc = 0;
cpump->event = event; cpump->event = event;
++cpump->refcnt;
unlock: undo:
if (rc) { if (rc) {
/* Error in allocation of event, decrement anchor. Since /* Error in allocation of event, decrement anchor. Since
* the event in not created, its destroy() function is never * the event in not created, its destroy() function is never
...@@ -211,6 +213,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) ...@@ -211,6 +213,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
*/ */
paiext_root_free(); paiext_root_free();
} }
unlock:
mutex_unlock(&paiext_reserve_mutex); mutex_unlock(&paiext_reserve_mutex);
/* If rc is non-zero, no increment of counter/sampler was done. */ /* If rc is non-zero, no increment of counter/sampler was done. */
return rc; return rc;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Makefile for s390-specific library files.. # Makefile for s390-specific library files..
# #
lib-y += delay.o string.o uaccess.o find.o spinlock.o lib-y += delay.o string.o uaccess.o find.o spinlock.o tishift.o
obj-y += mem.o xor.o obj-y += mem.o xor.o
lib-$(CONFIG_KPROBES) += probes.o lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o lib-$(CONFIG_UPROBES) += probes.o
......
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/nospec-insn.h>
#include <asm/export.h>
.section .noinstr.text, "ax"
GEN_BR_THUNK %r14
SYM_FUNC_START(__ashlti3)
lmg %r0,%r1,0(%r3)
cije %r4,0,1f
lhi %r3,64
sr %r3,%r4
jnh 0f
srlg %r3,%r1,0(%r3)
sllg %r0,%r0,0(%r4)
sllg %r1,%r1,0(%r4)
ogr %r0,%r3
j 1f
0: sllg %r0,%r1,-64(%r4)
lghi %r1,0
1: stmg %r0,%r1,0(%r2)
BR_EX %r14
SYM_FUNC_END(__ashlti3)
EXPORT_SYMBOL(__ashlti3)
SYM_FUNC_START(__ashrti3)
lmg %r0,%r1,0(%r3)
cije %r4,0,1f
lhi %r3,64
sr %r3,%r4
jnh 0f
sllg %r3,%r0,0(%r3)
srlg %r1,%r1,0(%r4)
srag %r0,%r0,0(%r4)
ogr %r1,%r3
j 1f
0: srag %r1,%r0,-64(%r4)
srag %r0,%r0,63
1: stmg %r0,%r1,0(%r2)
BR_EX %r14
SYM_FUNC_END(__ashrti3)
EXPORT_SYMBOL(__ashrti3)
SYM_FUNC_START(__lshrti3)
lmg %r0,%r1,0(%r3)
cije %r4,0,1f
lhi %r3,64
sr %r3,%r4
jnh 0f
sllg %r3,%r0,0(%r3)
srlg %r1,%r1,0(%r4)
srlg %r0,%r0,0(%r4)
ogr %r1,%r3
j 1f
0: srlg %r1,%r0,-64(%r4)
lghi %r0,0
1: stmg %r0,%r1,0(%r2)
BR_EX %r14
SYM_FUNC_END(__lshrti3)
EXPORT_SYMBOL(__lshrti3)
...@@ -51,6 +51,7 @@ static struct dentry *zcore_dir; ...@@ -51,6 +51,7 @@ static struct dentry *zcore_dir;
static struct dentry *zcore_reipl_file; static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file; static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block; static struct ipl_parameter_block *zcore_ipl_block;
static unsigned long os_info_flags;
static DEFINE_MUTEX(hsa_buf_mutex); static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
...@@ -139,7 +140,13 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, ...@@ -139,7 +140,13 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
{ {
if (zcore_ipl_block) { if (zcore_ipl_block) {
diag308(DIAG308_SET, zcore_ipl_block); diag308(DIAG308_SET, zcore_ipl_block);
diag308(DIAG308_LOAD_CLEAR, NULL); if (os_info_flags & OS_INFO_FLAG_REIPL_CLEAR)
diag308(DIAG308_LOAD_CLEAR, NULL);
/* Use special diag308 subcode for CCW normal ipl */
if (zcore_ipl_block->pb0_hdr.pbt == IPL_PBT_CCW)
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
else
diag308(DIAG308_LOAD_NORMAL, NULL);
} }
return count; return count;
} }
...@@ -212,7 +219,10 @@ static int __init check_sdias(void) ...@@ -212,7 +219,10 @@ static int __init check_sdias(void)
*/ */
static int __init zcore_reipl_init(void) static int __init zcore_reipl_init(void)
{ {
struct os_info_entry *entry;
struct ipib_info ipib_info; struct ipib_info ipib_info;
unsigned long os_info_addr;
struct os_info *os_info;
int rc; int rc;
rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info)); rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
...@@ -234,6 +244,35 @@ static int __init zcore_reipl_init(void) ...@@ -234,6 +244,35 @@ static int __init zcore_reipl_init(void)
free_page((unsigned long) zcore_ipl_block); free_page((unsigned long) zcore_ipl_block);
zcore_ipl_block = NULL; zcore_ipl_block = NULL;
} }
/*
* Read the bit-flags field from os_info flags entry.
* Return zero even for os_info read or entry checksum errors in order
* to continue dump processing, considering that os_info could be
* corrupted on the panicked system.
*/
os_info = (void *)__get_free_page(GFP_KERNEL);
if (!os_info)
return -ENOMEM;
rc = memcpy_hsa_kernel(&os_info_addr, __LC_OS_INFO, sizeof(os_info_addr));
if (rc)
goto out;
if (os_info_addr < sclp.hsa_size)
rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE);
else
rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE);
if (rc || os_info_csum(os_info) != os_info->csum)
goto out;
entry = &os_info->entry[OS_INFO_FLAGS_ENTRY];
if (entry->addr && entry->size) {
if (entry->addr < sclp.hsa_size)
rc = memcpy_hsa_kernel(&os_info_flags, entry->addr, sizeof(os_info_flags));
else
rc = memcpy_real(&os_info_flags, entry->addr, sizeof(os_info_flags));
if (rc || (__force u32)csum_partial(&os_info_flags, entry->size, 0) != entry->csum)
os_info_flags = 0;
}
out:
free_page((unsigned long)os_info);
return 0; return 0;
} }
......
...@@ -171,7 +171,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) ...@@ -171,7 +171,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
return -ENODEV; return -ENODEV;
} }
parent = kzalloc(sizeof(*parent), GFP_KERNEL); parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL);
if (!parent) if (!parent)
return -ENOMEM; return -ENOMEM;
......
...@@ -79,7 +79,7 @@ struct vfio_ccw_parent { ...@@ -79,7 +79,7 @@ struct vfio_ccw_parent {
struct mdev_parent parent; struct mdev_parent parent;
struct mdev_type mdev_type; struct mdev_type mdev_type;
struct mdev_type *mdev_types[1]; struct mdev_type *mdev_types[];
}; };
/** /**
......
...@@ -2,7 +2,8 @@ ...@@ -2,7 +2,8 @@
/* /*
* pkey device driver * pkey device driver
* *
* Copyright IBM Corp. 2017,2019 * Copyright IBM Corp. 2017, 2023
*
* Author(s): Harald Freudenberger * Author(s): Harald Freudenberger
*/ */
...@@ -32,8 +33,10 @@ MODULE_AUTHOR("IBM Corporation"); ...@@ -32,8 +33,10 @@ MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key interface"); MODULE_DESCRIPTION("s390 protected key interface");
#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ #define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header))
#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ #define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ #define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */
/* /*
* debug feature data and functions * debug feature data and functions
...@@ -71,49 +74,106 @@ struct protaeskeytoken { ...@@ -71,49 +74,106 @@ struct protaeskeytoken {
} __packed; } __packed;
/* inside view of a clear key token (type 0x00 version 0x02) */ /* inside view of a clear key token (type 0x00 version 0x02) */
struct clearaeskeytoken { struct clearkeytoken {
u8 type; /* 0x00 for PAES specific key tokens */ u8 type; /* 0x00 for PAES specific key tokens */
u8 res0[3]; u8 res0[3];
u8 version; /* 0x02 for clear AES key token */ u8 version; /* 0x02 for clear key token */
u8 res1[3]; u8 res1[3];
u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */
u32 len; /* bytes actually stored in clearkey[] */ u32 len; /* bytes actually stored in clearkey[] */
u8 clearkey[]; /* clear key value */ u8 clearkey[]; /* clear key value */
} __packed; } __packed;
/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */
static inline u32 pkey_keytype_aes_to_size(u32 keytype)
{
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
return 16;
case PKEY_KEYTYPE_AES_192:
return 24;
case PKEY_KEYTYPE_AES_256:
return 32;
default:
return 0;
}
}
/* /*
* Create a protected key from a clear key value. * Create a protected key from a clear key value via PCKMO instruction.
*/ */
static int pkey_clr2protkey(u32 keytype, static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
const struct pkey_clrkey *clrkey, u8 *protkey, u32 *protkeylen, u32 *protkeytype)
struct pkey_protkey *protkey)
{ {
/* mask of available pckmo subfunctions */ /* mask of available pckmo subfunctions */
static cpacf_mask_t pckmo_functions; static cpacf_mask_t pckmo_functions;
long fc; u8 paramblock[112];
u32 pkeytype;
int keysize; int keysize;
u8 paramblock[64]; long fc;
switch (keytype) { switch (keytype) {
case PKEY_KEYTYPE_AES_128: case PKEY_KEYTYPE_AES_128:
/* 16 byte key, 32 byte aes wkvp, total 48 bytes */
keysize = 16; keysize = 16;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_128_KEY; fc = CPACF_PCKMO_ENC_AES_128_KEY;
break; break;
case PKEY_KEYTYPE_AES_192: case PKEY_KEYTYPE_AES_192:
/* 24 byte key, 32 byte aes wkvp, total 56 bytes */
keysize = 24; keysize = 24;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_192_KEY; fc = CPACF_PCKMO_ENC_AES_192_KEY;
break; break;
case PKEY_KEYTYPE_AES_256: case PKEY_KEYTYPE_AES_256:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32; keysize = 32;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_256_KEY; fc = CPACF_PCKMO_ENC_AES_256_KEY;
break; break;
case PKEY_KEYTYPE_ECC_P256:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
break;
case PKEY_KEYTYPE_ECC_P384:
/* 48 byte key, 32 byte aes wkvp, total 80 bytes */
keysize = 48;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
break;
case PKEY_KEYTYPE_ECC_P521:
/* 80 byte key, 32 byte aes wkvp, total 112 bytes */
keysize = 80;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
break;
case PKEY_KEYTYPE_ECC_ED25519:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
break;
case PKEY_KEYTYPE_ECC_ED448:
/* 64 byte key, 32 byte aes wkvp, total 96 bytes */
keysize = 64;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
break;
default: default:
DEBUG_ERR("%s unknown/unsupported keytype %d\n", DEBUG_ERR("%s unknown/unsupported keytype %u\n",
__func__, keytype); __func__, keytype);
return -EINVAL; return -EINVAL;
} }
if (*protkeylen < keysize + AES_WK_VP_SIZE) {
DEBUG_ERR("%s prot key buffer size too small: %u < %d\n",
__func__, *protkeylen, keysize + AES_WK_VP_SIZE);
return -EINVAL;
}
/* Did we already check for PCKMO ? */ /* Did we already check for PCKMO ? */
if (!pckmo_functions.bytes[0]) { if (!pckmo_functions.bytes[0]) {
/* no, so check now */ /* no, so check now */
...@@ -128,15 +188,15 @@ static int pkey_clr2protkey(u32 keytype, ...@@ -128,15 +188,15 @@ static int pkey_clr2protkey(u32 keytype,
/* prepare param block */ /* prepare param block */
memset(paramblock, 0, sizeof(paramblock)); memset(paramblock, 0, sizeof(paramblock));
memcpy(paramblock, clrkey->clrkey, keysize); memcpy(paramblock, clrkey, keysize);
/* call the pckmo instruction */ /* call the pckmo instruction */
cpacf_pckmo(fc, paramblock); cpacf_pckmo(fc, paramblock);
/* copy created protected key */ /* copy created protected key to key buffer including the wkvp block */
protkey->type = keytype; *protkeylen = keysize + AES_WK_VP_SIZE;
protkey->len = keysize + 32; memcpy(protkey, paramblock, *protkeylen);
memcpy(protkey->protkey, paramblock, keysize + 32); *protkeytype = pkeytype;
return 0; return 0;
} }
...@@ -144,11 +204,12 @@ static int pkey_clr2protkey(u32 keytype, ...@@ -144,11 +204,12 @@ static int pkey_clr2protkey(u32 keytype,
/* /*
* Find card and transform secure key into protected key. * Find card and transform secure key into protected key.
*/ */
static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) static int pkey_skey2pkey(const u8 *key, u8 *protkey,
u32 *protkeylen, u32 *protkeytype)
{ {
int rc, verify;
u16 cardnr, domain;
struct keytoken_header *hdr = (struct keytoken_header *)key; struct keytoken_header *hdr = (struct keytoken_header *)key;
u16 cardnr, domain;
int rc, verify;
zcrypt_wait_api_operational(); zcrypt_wait_api_operational();
...@@ -167,14 +228,13 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) ...@@ -167,14 +228,13 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
continue; continue;
switch (hdr->version) { switch (hdr->version) {
case TOKVER_CCA_AES: case TOKVER_CCA_AES:
rc = cca_sec2protkey(cardnr, domain, rc = cca_sec2protkey(cardnr, domain, key,
key, pkey->protkey, protkey, protkeylen, protkeytype);
&pkey->len, &pkey->type);
break; break;
case TOKVER_CCA_VLSC: case TOKVER_CCA_VLSC:
rc = cca_cipher2protkey(cardnr, domain, rc = cca_cipher2protkey(cardnr, domain, key,
key, pkey->protkey, protkey, protkeylen,
&pkey->len, &pkey->type); protkeytype);
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -195,9 +255,9 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) ...@@ -195,9 +255,9 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
u8 *keybuf, size_t *keybuflen) u8 *keybuf, size_t *keybuflen)
{ {
int i, rc;
u16 card, dom;
u32 nr_apqns, *apqns = NULL; u32 nr_apqns, *apqns = NULL;
u16 card, dom;
int i, rc;
zcrypt_wait_api_operational(); zcrypt_wait_api_operational();
...@@ -227,12 +287,13 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, ...@@ -227,12 +287,13 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
/* /*
* Find card and transform EP11 secure key into protected key. * Find card and transform EP11 secure key into protected key.
*/ */
static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey) static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
u32 *protkeylen, u32 *protkeytype)
{ {
int i, rc;
u16 card, dom;
u32 nr_apqns, *apqns = NULL;
struct ep11keyblob *kb = (struct ep11keyblob *)key; struct ep11keyblob *kb = (struct ep11keyblob *)key;
u32 nr_apqns, *apqns = NULL;
u16 card, dom;
int i, rc;
zcrypt_wait_api_operational(); zcrypt_wait_api_operational();
...@@ -246,9 +307,8 @@ static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey) ...@@ -246,9 +307,8 @@ static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
card = apqns[i] >> 16; card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF; dom = apqns[i] & 0xFFFF;
pkey->len = sizeof(pkey->protkey);
rc = ep11_kblob2protkey(card, dom, key, kb->head.len, rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
pkey->protkey, &pkey->len, &pkey->type); protkey, protkeylen, protkeytype);
if (rc == 0) if (rc == 0)
break; break;
} }
...@@ -306,38 +366,31 @@ static int pkey_verifykey(const struct pkey_seckey *seckey, ...@@ -306,38 +366,31 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
/* /*
* Generate a random protected key * Generate a random protected key
*/ */
static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey) static int pkey_genprotkey(u32 keytype, u8 *protkey,
u32 *protkeylen, u32 *protkeytype)
{ {
struct pkey_clrkey clrkey; u8 clrkey[32];
int keysize; int keysize;
int rc; int rc;
switch (keytype) { keysize = pkey_keytype_aes_to_size(keytype);
case PKEY_KEYTYPE_AES_128: if (!keysize) {
keysize = 16;
break;
case PKEY_KEYTYPE_AES_192:
keysize = 24;
break;
case PKEY_KEYTYPE_AES_256:
keysize = 32;
break;
default:
DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
keytype); keytype);
return -EINVAL; return -EINVAL;
} }
/* generate a dummy random clear key */ /* generate a dummy random clear key */
get_random_bytes(clrkey.clrkey, keysize); get_random_bytes(clrkey, keysize);
/* convert it to a dummy protected key */ /* convert it to a dummy protected key */
rc = pkey_clr2protkey(keytype, &clrkey, protkey); rc = pkey_clr2protkey(keytype, clrkey,
protkey, protkeylen, protkeytype);
if (rc) if (rc)
return rc; return rc;
/* replace the key part of the protected key with random bytes */ /* replace the key part of the protected key with random bytes */
get_random_bytes(protkey->protkey, keysize); get_random_bytes(protkey, keysize);
return 0; return 0;
} }
...@@ -345,37 +398,46 @@ static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey) ...@@ -345,37 +398,46 @@ static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey)
/* /*
* Verify if a protected key is still valid * Verify if a protected key is still valid
*/ */
static int pkey_verifyprotkey(const struct pkey_protkey *protkey) static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
u32 protkeytype)
{ {
unsigned long fc;
struct { struct {
u8 iv[AES_BLOCK_SIZE]; u8 iv[AES_BLOCK_SIZE];
u8 key[MAXPROTKEYSIZE]; u8 key[MAXPROTKEYSIZE];
} param; } param;
u8 null_msg[AES_BLOCK_SIZE]; u8 null_msg[AES_BLOCK_SIZE];
u8 dest_buf[AES_BLOCK_SIZE]; u8 dest_buf[AES_BLOCK_SIZE];
unsigned int k; unsigned int k, pkeylen;
unsigned long fc;
switch (protkey->type) { switch (protkeytype) {
case PKEY_KEYTYPE_AES_128: case PKEY_KEYTYPE_AES_128:
pkeylen = 16 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_128; fc = CPACF_KMC_PAES_128;
break; break;
case PKEY_KEYTYPE_AES_192: case PKEY_KEYTYPE_AES_192:
pkeylen = 24 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_192; fc = CPACF_KMC_PAES_192;
break; break;
case PKEY_KEYTYPE_AES_256: case PKEY_KEYTYPE_AES_256:
pkeylen = 32 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_256; fc = CPACF_KMC_PAES_256;
break; break;
default: default:
DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__,
protkey->type); protkeytype);
return -EINVAL;
}
if (protkeylen != pkeylen) {
DEBUG_ERR("%s invalid protected key size %u for keytype %u\n",
__func__, protkeylen, protkeytype);
return -EINVAL; return -EINVAL;
} }
memset(null_msg, 0, sizeof(null_msg)); memset(null_msg, 0, sizeof(null_msg));
memset(param.iv, 0, sizeof(param.iv)); memset(param.iv, 0, sizeof(param.iv));
memcpy(param.key, protkey->protkey, sizeof(param.key)); memcpy(param.key, protkey, protkeylen);
k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf, k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
sizeof(null_msg)); sizeof(null_msg));
...@@ -387,15 +449,119 @@ static int pkey_verifyprotkey(const struct pkey_protkey *protkey) ...@@ -387,15 +449,119 @@ static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
return 0; return 0;
} }
/* Helper for pkey_nonccatok2pkey, handles aes clear key token */
static int nonccatokaes2pkey(const struct clearkeytoken *t,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE);
u8 *tmpbuf = NULL;
u32 keysize;
int rc;
keysize = pkey_keytype_aes_to_size(t->keytype);
if (!keysize) {
DEBUG_ERR("%s unknown/unsupported keytype %u\n",
__func__, t->keytype);
return -EINVAL;
}
if (t->len != keysize) {
DEBUG_ERR("%s non clear key aes token: invalid key len %u\n",
__func__, t->len);
return -EINVAL;
}
/* try direct way with the PCKMO instruction */
rc = pkey_clr2protkey(t->keytype, t->clearkey,
protkey, protkeylen, protkeytype);
if (!rc)
goto out;
/* PCKMO failed, so try the CCA secure key way */
tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
if (!tmpbuf)
return -ENOMEM;
zcrypt_wait_api_operational();
rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf);
if (rc)
goto try_via_ep11;
rc = pkey_skey2pkey(tmpbuf,
protkey, protkeylen, protkeytype);
if (!rc)
goto out;
try_via_ep11:
/* if the CCA way also failed, let's try via EP11 */
rc = pkey_clr2ep11key(t->clearkey, t->len,
tmpbuf, &tmpbuflen);
if (rc)
goto failure;
rc = pkey_ep11key2pkey(tmpbuf,
protkey, protkeylen, protkeytype);
if (!rc)
goto out;
failure:
DEBUG_ERR("%s unable to build protected key from clear", __func__);
out:
kfree(tmpbuf);
return rc;
}
/* Helper for pkey_nonccatok2pkey, handles ecc clear key token */
static int nonccatokecc2pkey(const struct clearkeytoken *t,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
u32 keylen;
int rc;
switch (t->keytype) {
case PKEY_KEYTYPE_ECC_P256:
keylen = 32;
break;
case PKEY_KEYTYPE_ECC_P384:
keylen = 48;
break;
case PKEY_KEYTYPE_ECC_P521:
keylen = 80;
break;
case PKEY_KEYTYPE_ECC_ED25519:
keylen = 32;
break;
case PKEY_KEYTYPE_ECC_ED448:
keylen = 64;
break;
default:
DEBUG_ERR("%s unknown/unsupported keytype %u\n",
__func__, t->keytype);
return -EINVAL;
}
if (t->len != keylen) {
DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n",
__func__, t->len);
return -EINVAL;
}
/* only one path possible: via PCKMO instruction */
rc = pkey_clr2protkey(t->keytype, t->clearkey,
protkey, protkeylen, protkeytype);
if (rc) {
DEBUG_ERR("%s unable to build protected key from clear",
__func__);
}
return rc;
}
/* /*
* Transform a non-CCA key token into a protected key * Transform a non-CCA key token into a protected key
*/ */
static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey) u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{ {
int rc = -EINVAL;
u8 *tmpbuf = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key; struct keytoken_header *hdr = (struct keytoken_header *)key;
int rc = -EINVAL;
switch (hdr->version) { switch (hdr->version) {
case TOKVER_PROTECTED_KEY: { case TOKVER_PROTECTED_KEY: {
...@@ -404,59 +570,40 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, ...@@ -404,59 +570,40 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
if (keylen != sizeof(struct protaeskeytoken)) if (keylen != sizeof(struct protaeskeytoken))
goto out; goto out;
t = (struct protaeskeytoken *)key; t = (struct protaeskeytoken *)key;
protkey->len = t->len; rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype);
protkey->type = t->keytype; if (rc)
memcpy(protkey->protkey, t->protkey, goto out;
sizeof(protkey->protkey)); memcpy(protkey, t->protkey, t->len);
rc = pkey_verifyprotkey(protkey); *protkeylen = t->len;
*protkeytype = t->keytype;
break; break;
} }
case TOKVER_CLEAR_KEY: { case TOKVER_CLEAR_KEY: {
struct clearaeskeytoken *t; struct clearkeytoken *t = (struct clearkeytoken *)key;
struct pkey_clrkey ckey;
union u_tmpbuf { if (keylen < sizeof(struct clearkeytoken) ||
u8 skey[SECKEYBLOBSIZE]; keylen != sizeof(*t) + t->len)
u8 ep11key[MAXEP11AESKEYBLOBSIZE];
};
size_t tmpbuflen = sizeof(union u_tmpbuf);
if (keylen < sizeof(struct clearaeskeytoken))
goto out;
t = (struct clearaeskeytoken *)key;
if (keylen != sizeof(*t) + t->len)
goto out;
if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) ||
(t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) ||
(t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
memcpy(ckey.clrkey, t->clearkey, t->len);
else
goto out;
/* alloc temp key buffer space */
tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
if (!tmpbuf) {
rc = -ENOMEM;
goto out; goto out;
} switch (t->keytype) {
/* try direct way with the PCKMO instruction */ case PKEY_KEYTYPE_AES_128:
rc = pkey_clr2protkey(t->keytype, &ckey, protkey); case PKEY_KEYTYPE_AES_192:
if (rc == 0) case PKEY_KEYTYPE_AES_256:
rc = nonccatokaes2pkey(t, protkey,
protkeylen, protkeytype);
break; break;
/* PCKMO failed, so try the CCA secure key way */ case PKEY_KEYTYPE_ECC_P256:
zcrypt_wait_api_operational(); case PKEY_KEYTYPE_ECC_P384:
rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, case PKEY_KEYTYPE_ECC_P521:
ckey.clrkey, tmpbuf); case PKEY_KEYTYPE_ECC_ED25519:
if (rc == 0) case PKEY_KEYTYPE_ECC_ED448:
rc = pkey_skey2pkey(tmpbuf, protkey); rc = nonccatokecc2pkey(t, protkey,
if (rc == 0) protkeylen, protkeytype);
break; break;
/* if the CCA way also failed, let's try via EP11 */ default:
rc = pkey_clr2ep11key(ckey.clrkey, t->len, DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n",
tmpbuf, &tmpbuflen); __func__, t->keytype);
if (rc == 0) return -EINVAL;
rc = pkey_ep11key2pkey(tmpbuf, protkey); }
/* now we should really have an protected key */
DEBUG_ERR("%s unable to build protected key from clear",
__func__);
break; break;
} }
case TOKVER_EP11_AES: { case TOKVER_EP11_AES: {
...@@ -464,7 +611,8 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, ...@@ -464,7 +611,8 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
if (rc) if (rc)
goto out; goto out;
rc = pkey_ep11key2pkey(key, protkey); rc = pkey_ep11key2pkey(key,
protkey, protkeylen, protkeytype);
break; break;
} }
case TOKVER_EP11_AES_WITH_HEADER: case TOKVER_EP11_AES_WITH_HEADER:
...@@ -473,16 +621,14 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, ...@@ -473,16 +621,14 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
if (rc) if (rc)
goto out; goto out;
rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header), rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header),
protkey); protkey, protkeylen, protkeytype);
break; break;
default: default:
DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n", DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
__func__, hdr->version); __func__, hdr->version);
rc = -EINVAL;
} }
out: out:
kfree(tmpbuf);
return rc; return rc;
} }
...@@ -490,7 +636,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, ...@@ -490,7 +636,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
* Transform a CCA internal key token into a protected key * Transform a CCA internal key token into a protected key
*/ */
static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey) u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{ {
struct keytoken_header *hdr = (struct keytoken_header *)key; struct keytoken_header *hdr = (struct keytoken_header *)key;
...@@ -509,17 +655,17 @@ static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, ...@@ -509,17 +655,17 @@ static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
return -EINVAL; return -EINVAL;
} }
return pkey_skey2pkey(key, protkey); return pkey_skey2pkey(key, protkey, protkeylen, protkeytype);
} }
/* /*
* Transform a key blob (of any type) into a protected key * Transform a key blob (of any type) into a protected key
*/ */
int pkey_keyblob2pkey(const u8 *key, u32 keylen, int pkey_keyblob2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey) u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{ {
int rc;
struct keytoken_header *hdr = (struct keytoken_header *)key; struct keytoken_header *hdr = (struct keytoken_header *)key;
int rc;
if (keylen < sizeof(struct keytoken_header)) { if (keylen < sizeof(struct keytoken_header)) {
DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen); DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
...@@ -528,10 +674,12 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen, ...@@ -528,10 +674,12 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
switch (hdr->type) { switch (hdr->type) {
case TOKTYPE_NON_CCA: case TOKTYPE_NON_CCA:
rc = pkey_nonccatok2pkey(key, keylen, protkey); rc = pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break; break;
case TOKTYPE_CCA_INTERNAL: case TOKTYPE_CCA_INTERNAL:
rc = pkey_ccainttok2pkey(key, keylen, protkey); rc = pkey_ccainttok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break; break;
default: default:
DEBUG_ERR("%s unknown/unsupported blob type %d\n", DEBUG_ERR("%s unknown/unsupported blob type %d\n",
...@@ -663,9 +811,9 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, ...@@ -663,9 +811,9 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
enum pkey_key_type *ktype, enum pkey_key_type *ktype,
enum pkey_key_size *ksize, u32 *flags) enum pkey_key_size *ksize, u32 *flags)
{ {
int rc;
u32 _nr_apqns, *_apqns = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key; struct keytoken_header *hdr = (struct keytoken_header *)key;
u32 _nr_apqns, *_apqns = NULL;
int rc;
if (keylen < sizeof(struct keytoken_header)) if (keylen < sizeof(struct keytoken_header))
return -EINVAL; return -EINVAL;
...@@ -771,10 +919,10 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, ...@@ -771,10 +919,10 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, size_t keylen, const u8 *key, size_t keylen,
struct pkey_protkey *pkey) u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{ {
int i, card, dom, rc;
struct keytoken_header *hdr = (struct keytoken_header *)key; struct keytoken_header *hdr = (struct keytoken_header *)key;
int i, card, dom, rc;
/* check for at least one apqn given */ /* check for at least one apqn given */
if (!apqns || !nr_apqns) if (!apqns || !nr_apqns)
...@@ -806,7 +954,9 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, ...@@ -806,7 +954,9 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
return -EINVAL; return -EINVAL;
} else { } else {
return pkey_nonccatok2pkey(key, keylen, pkey); return pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen,
protkeytype);
} }
} else { } else {
DEBUG_ERR("%s unknown/unsupported blob type %d\n", DEBUG_ERR("%s unknown/unsupported blob type %d\n",
...@@ -822,20 +972,20 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, ...@@ -822,20 +972,20 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
dom = apqns[i].domain; dom = apqns[i].domain;
if (hdr->type == TOKTYPE_CCA_INTERNAL && if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES) { hdr->version == TOKVER_CCA_AES) {
rc = cca_sec2protkey(card, dom, key, pkey->protkey, rc = cca_sec2protkey(card, dom, key,
&pkey->len, &pkey->type); protkey, protkeylen, protkeytype);
} else if (hdr->type == TOKTYPE_CCA_INTERNAL && } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC) { hdr->version == TOKVER_CCA_VLSC) {
rc = cca_cipher2protkey(card, dom, key, pkey->protkey, rc = cca_cipher2protkey(card, dom, key,
&pkey->len, &pkey->type); protkey, protkeylen,
protkeytype);
} else { } else {
/* EP11 AES secure key blob */ /* EP11 AES secure key blob */
struct ep11keyblob *kb = (struct ep11keyblob *)key; struct ep11keyblob *kb = (struct ep11keyblob *)key;
pkey->len = sizeof(pkey->protkey);
rc = ep11_kblob2protkey(card, dom, key, kb->head.len, rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
pkey->protkey, &pkey->len, protkey, protkeylen,
&pkey->type); protkeytype);
} }
if (rc == 0) if (rc == 0)
break; break;
...@@ -847,9 +997,9 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, ...@@ -847,9 +997,9 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns) struct pkey_apqn *apqns, size_t *nr_apqns)
{ {
int rc;
u32 _nr_apqns, *_apqns = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key; struct keytoken_header *hdr = (struct keytoken_header *)key;
u32 _nr_apqns, *_apqns = NULL;
int rc;
if (keylen < sizeof(struct keytoken_header) || flags == 0) if (keylen < sizeof(struct keytoken_header) || flags == 0)
return -EINVAL; return -EINVAL;
...@@ -860,9 +1010,9 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, ...@@ -860,9 +1010,9 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
(hdr->version == TOKVER_EP11_AES_WITH_HEADER || (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
int minhwtype = 0, api = 0;
struct ep11keyblob *kb = (struct ep11keyblob *) struct ep11keyblob *kb = (struct ep11keyblob *)
(key + sizeof(struct ep11kblob_header)); (key + sizeof(struct ep11kblob_header));
int minhwtype = 0, api = 0;
if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
return -EINVAL; return -EINVAL;
...@@ -877,8 +1027,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, ...@@ -877,8 +1027,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
} else if (hdr->type == TOKTYPE_NON_CCA && } else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES && hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) { is_ep11_keyblob(key)) {
int minhwtype = 0, api = 0;
struct ep11keyblob *kb = (struct ep11keyblob *)key; struct ep11keyblob *kb = (struct ep11keyblob *)key;
int minhwtype = 0, api = 0;
if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
return -EINVAL; return -EINVAL;
...@@ -891,8 +1041,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, ...@@ -891,8 +1041,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
if (rc) if (rc)
goto out; goto out;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL) { } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
int minhwtype = ZCRYPT_CEX3C;
u64 cur_mkvp = 0, old_mkvp = 0; u64 cur_mkvp = 0, old_mkvp = 0;
int minhwtype = ZCRYPT_CEX3C;
if (hdr->version == TOKVER_CCA_AES) { if (hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key; struct secaeskeytoken *t = (struct secaeskeytoken *)key;
...@@ -919,8 +1069,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, ...@@ -919,8 +1069,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
if (rc) if (rc)
goto out; goto out;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
u64 cur_mkvp = 0, old_mkvp = 0;
struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
u64 cur_mkvp = 0, old_mkvp = 0;
if (t->secid == 0x20) { if (t->secid == 0x20) {
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
...@@ -957,8 +1107,8 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, ...@@ -957,8 +1107,8 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns) struct pkey_apqn *apqns, size_t *nr_apqns)
{ {
int rc;
u32 _nr_apqns, *_apqns = NULL; u32 _nr_apqns, *_apqns = NULL;
int rc;
zcrypt_wait_api_operational(); zcrypt_wait_api_operational();
...@@ -1020,11 +1170,11 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, ...@@ -1020,11 +1170,11 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
} }
static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, size_t keylen, u32 *protkeytype, const u8 *key, size_t keylen,
u8 *protkey, u32 *protkeylen) u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{ {
int i, card, dom, rc;
struct keytoken_header *hdr = (struct keytoken_header *)key; struct keytoken_header *hdr = (struct keytoken_header *)key;
int i, card, dom, rc;
/* check for at least one apqn given */ /* check for at least one apqn given */
if (!apqns || !nr_apqns) if (!apqns || !nr_apqns)
...@@ -1076,15 +1226,8 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, ...@@ -1076,15 +1226,8 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1)) if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
return -EINVAL; return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA) { } else if (hdr->type == TOKTYPE_NON_CCA) {
struct pkey_protkey pkey; return pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
rc = pkey_nonccatok2pkey(key, keylen, &pkey);
if (rc)
return rc;
memcpy(protkey, pkey.protkey, pkey.len);
*protkeylen = pkey.len;
*protkeytype = pkey.type;
return 0;
} else { } else {
DEBUG_ERR("%s unknown/unsupported blob type %d\n", DEBUG_ERR("%s unknown/unsupported blob type %d\n",
__func__, hdr->type); __func__, hdr->type);
...@@ -1130,7 +1273,7 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, ...@@ -1130,7 +1273,7 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
static void *_copy_key_from_user(void __user *ukey, size_t keylen) static void *_copy_key_from_user(void __user *ukey, size_t keylen)
{ {
if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE) if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return memdup_user(ukey, keylen); return memdup_user(ukey, keylen);
...@@ -1187,6 +1330,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1187,6 +1330,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&ksp, usp, sizeof(ksp))) if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT; return -EFAULT;
ksp.protkey.len = sizeof(ksp.protkey.protkey);
rc = cca_sec2protkey(ksp.cardnr, ksp.domain, rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
ksp.seckey.seckey, ksp.protkey.protkey, ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type); &ksp.protkey.len, &ksp.protkey.type);
...@@ -1203,8 +1347,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1203,8 +1347,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kcp, ucp, sizeof(kcp))) if (copy_from_user(&kcp, ucp, sizeof(kcp)))
return -EFAULT; return -EFAULT;
rc = pkey_clr2protkey(kcp.keytype, kcp.protkey.len = sizeof(kcp.protkey.protkey);
&kcp.clrkey, &kcp.protkey); rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey,
kcp.protkey.protkey,
&kcp.protkey.len, &kcp.protkey.type);
DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc); DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
if (rc) if (rc)
break; break;
...@@ -1234,7 +1380,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1234,7 +1380,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&ksp, usp, sizeof(ksp))) if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT; return -EFAULT;
rc = pkey_skey2pkey(ksp.seckey.seckey, &ksp.protkey); ksp.protkey.len = sizeof(ksp.protkey.protkey);
rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc); DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
if (rc) if (rc)
break; break;
...@@ -1263,7 +1411,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1263,7 +1411,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kgp, ugp, sizeof(kgp))) if (copy_from_user(&kgp, ugp, sizeof(kgp)))
return -EFAULT; return -EFAULT;
rc = pkey_genprotkey(kgp.keytype, &kgp.protkey); kgp.protkey.len = sizeof(kgp.protkey.protkey);
rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
&kgp.protkey.len, &kgp.protkey.type);
DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc); DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
if (rc) if (rc)
break; break;
...@@ -1277,7 +1427,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1277,7 +1427,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kvp, uvp, sizeof(kvp))) if (copy_from_user(&kvp, uvp, sizeof(kvp)))
return -EFAULT; return -EFAULT;
rc = pkey_verifyprotkey(&kvp.protkey); rc = pkey_verifyprotkey(kvp.protkey.protkey,
kvp.protkey.len, kvp.protkey.type);
DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc); DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
break; break;
} }
...@@ -1291,7 +1442,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1291,7 +1442,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kkey = _copy_key_from_user(ktp.key, ktp.keylen); kkey = _copy_key_from_user(ktp.key, ktp.keylen);
if (IS_ERR(kkey)) if (IS_ERR(kkey))
return PTR_ERR(kkey); return PTR_ERR(kkey);
rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
&ktp.protkey.len, &ktp.protkey.type);
DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
memzero_explicit(kkey, ktp.keylen); memzero_explicit(kkey, ktp.keylen);
kfree(kkey); kfree(kkey);
...@@ -1303,9 +1456,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1303,9 +1456,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
} }
case PKEY_GENSECK2: { case PKEY_GENSECK2: {
struct pkey_genseck2 __user *ugs = (void __user *)arg; struct pkey_genseck2 __user *ugs = (void __user *)arg;
size_t klen = KEYBLOBBUFSIZE;
struct pkey_genseck2 kgs; struct pkey_genseck2 kgs;
struct pkey_apqn *apqns; struct pkey_apqn *apqns;
size_t klen = KEYBLOBBUFSIZE;
u8 *kkey; u8 *kkey;
if (copy_from_user(&kgs, ugs, sizeof(kgs))) if (copy_from_user(&kgs, ugs, sizeof(kgs)))
...@@ -1345,9 +1498,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1345,9 +1498,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
} }
case PKEY_CLR2SECK2: { case PKEY_CLR2SECK2: {
struct pkey_clr2seck2 __user *ucs = (void __user *)arg; struct pkey_clr2seck2 __user *ucs = (void __user *)arg;
size_t klen = KEYBLOBBUFSIZE;
struct pkey_clr2seck2 kcs; struct pkey_clr2seck2 kcs;
struct pkey_apqn *apqns; struct pkey_apqn *apqns;
size_t klen = KEYBLOBBUFSIZE;
u8 *kkey; u8 *kkey;
if (copy_from_user(&kcs, ucs, sizeof(kcs))) if (copy_from_user(&kcs, ucs, sizeof(kcs)))
...@@ -1409,8 +1562,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1409,8 +1562,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
} }
case PKEY_KBLOB2PROTK2: { case PKEY_KBLOB2PROTK2: {
struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; struct pkey_kblob2pkey2 __user *utp = (void __user *)arg;
struct pkey_kblob2pkey2 ktp;
struct pkey_apqn *apqns = NULL; struct pkey_apqn *apqns = NULL;
struct pkey_kblob2pkey2 ktp;
u8 *kkey; u8 *kkey;
if (copy_from_user(&ktp, utp, sizeof(ktp))) if (copy_from_user(&ktp, utp, sizeof(ktp)))
...@@ -1423,8 +1576,11 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1423,8 +1576,11 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kfree(apqns); kfree(apqns);
return PTR_ERR(kkey); return PTR_ERR(kkey);
} }
ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries, rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries,
kkey, ktp.keylen, &ktp.protkey); kkey, ktp.keylen,
ktp.protkey.protkey, &ktp.protkey.len,
&ktp.protkey.type);
DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
kfree(apqns); kfree(apqns);
memzero_explicit(kkey, ktp.keylen); memzero_explicit(kkey, ktp.keylen);
...@@ -1437,8 +1593,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1437,8 +1593,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
} }
case PKEY_APQNS4K: { case PKEY_APQNS4K: {
struct pkey_apqns4key __user *uak = (void __user *)arg; struct pkey_apqns4key __user *uak = (void __user *)arg;
struct pkey_apqns4key kak;
struct pkey_apqn *apqns = NULL; struct pkey_apqn *apqns = NULL;
struct pkey_apqns4key kak;
size_t nr_apqns, len; size_t nr_apqns, len;
u8 *kkey; u8 *kkey;
...@@ -1486,8 +1642,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1486,8 +1642,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
} }
case PKEY_APQNS4KT: { case PKEY_APQNS4KT: {
struct pkey_apqns4keytype __user *uat = (void __user *)arg; struct pkey_apqns4keytype __user *uat = (void __user *)arg;
struct pkey_apqns4keytype kat;
struct pkey_apqn *apqns = NULL; struct pkey_apqn *apqns = NULL;
struct pkey_apqns4keytype kat;
size_t nr_apqns, len; size_t nr_apqns, len;
if (copy_from_user(&kat, uat, sizeof(kat))) if (copy_from_user(&kat, uat, sizeof(kat)))
...@@ -1528,9 +1684,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1528,9 +1684,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
} }
case PKEY_KBLOB2PROTK3: { case PKEY_KBLOB2PROTK3: {
struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; struct pkey_kblob2pkey3 __user *utp = (void __user *)arg;
struct pkey_kblob2pkey3 ktp;
struct pkey_apqn *apqns = NULL;
u32 protkeylen = PROTKEYBLOBBUFSIZE; u32 protkeylen = PROTKEYBLOBBUFSIZE;
struct pkey_apqn *apqns = NULL;
struct pkey_kblob2pkey3 ktp;
u8 *kkey, *protkey; u8 *kkey, *protkey;
if (copy_from_user(&ktp, utp, sizeof(ktp))) if (copy_from_user(&ktp, utp, sizeof(ktp)))
...@@ -1549,9 +1705,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -1549,9 +1705,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kfree(kkey); kfree(kkey);
return -ENOMEM; return -ENOMEM;
} }
rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey, rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
ktp.keylen, &ktp.pkeytype, kkey, ktp.keylen,
protkey, &protkeylen); protkey, &protkeylen, &ktp.pkeytype);
DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
kfree(apqns); kfree(apqns);
memzero_explicit(kkey, ktp.keylen); memzero_explicit(kkey, ktp.keylen);
...@@ -1612,7 +1768,9 @@ static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, ...@@ -1612,7 +1768,9 @@ static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
protkeytoken.version = TOKVER_PROTECTED_KEY; protkeytoken.version = TOKVER_PROTECTED_KEY;
protkeytoken.keytype = keytype; protkeytoken.keytype = keytype;
rc = pkey_genprotkey(protkeytoken.keytype, &protkey); protkey.len = sizeof(protkey.protkey);
rc = pkey_genprotkey(protkeytoken.keytype,
protkey.protkey, &protkey.len, &protkey.type);
if (rc) if (rc)
return rc; return rc;
...@@ -1622,7 +1780,10 @@ static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, ...@@ -1622,7 +1780,10 @@ static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
memcpy(buf, &protkeytoken, sizeof(protkeytoken)); memcpy(buf, &protkeytoken, sizeof(protkeytoken));
if (is_xts) { if (is_xts) {
rc = pkey_genprotkey(protkeytoken.keytype, &protkey); /* xts needs a second protected key, reuse protkey struct */
protkey.len = sizeof(protkey.protkey);
rc = pkey_genprotkey(protkeytoken.keytype,
protkey.protkey, &protkey.len, &protkey.type);
if (rc) if (rc)
return rc; return rc;
...@@ -1717,8 +1878,8 @@ static struct attribute_group protkey_attr_group = { ...@@ -1717,8 +1878,8 @@ static struct attribute_group protkey_attr_group = {
static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
loff_t off, size_t count) loff_t off, size_t count)
{ {
int rc;
struct pkey_seckey *seckey = (struct pkey_seckey *)buf; struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
int rc;
if (off != 0 || count < sizeof(struct secaeskeytoken)) if (off != 0 || count < sizeof(struct secaeskeytoken))
return -EINVAL; return -EINVAL;
...@@ -1824,9 +1985,9 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, ...@@ -1824,9 +1985,9 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off, bool is_xts, char *buf, loff_t off,
size_t count) size_t count)
{ {
int i, rc, card, dom;
u32 nr_apqns, *apqns = NULL;
size_t keysize = CCACIPHERTOKENSIZE; size_t keysize = CCACIPHERTOKENSIZE;
u32 nr_apqns, *apqns = NULL;
int i, rc, card, dom;
if (off != 0 || count < CCACIPHERTOKENSIZE) if (off != 0 || count < CCACIPHERTOKENSIZE)
return -EINVAL; return -EINVAL;
...@@ -1947,9 +2108,9 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, ...@@ -1947,9 +2108,9 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off, bool is_xts, char *buf, loff_t off,
size_t count) size_t count)
{ {
int i, rc, card, dom;
u32 nr_apqns, *apqns = NULL;
size_t keysize = MAXEP11AESKEYBLOBSIZE; size_t keysize = MAXEP11AESKEYBLOBSIZE;
u32 nr_apqns, *apqns = NULL;
int i, rc, card, dom;
if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
return -EINVAL; return -EINVAL;
......
...@@ -716,6 +716,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev) ...@@ -716,6 +716,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret) if (ret)
goto err_put_vdev; goto err_put_vdev;
matrix_mdev->req_trigger = NULL;
dev_set_drvdata(&mdev->dev, matrix_mdev); dev_set_drvdata(&mdev->dev, matrix_mdev);
mutex_lock(&matrix_dev->mdevs_lock); mutex_lock(&matrix_dev->mdevs_lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list); list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
...@@ -1735,6 +1736,26 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev) ...@@ -1735,6 +1736,26 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
vfio_ap_mdev_unset_kvm(matrix_mdev); vfio_ap_mdev_unset_kvm(matrix_mdev);
} }
static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
{
struct device *dev = vdev->dev;
struct ap_matrix_mdev *matrix_mdev;
matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev);
if (matrix_mdev->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(dev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(matrix_mdev->req_trigger, 1);
} else if (count == 0) {
dev_notice(dev,
"No device request registered, blocked until released by user\n");
}
}
static int vfio_ap_mdev_get_device_info(unsigned long arg) static int vfio_ap_mdev_get_device_info(unsigned long arg)
{ {
unsigned long minsz; unsigned long minsz;
...@@ -1750,11 +1771,115 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) ...@@ -1750,11 +1771,115 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg)
info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
info.num_regions = 0; info.num_regions = 0;
info.num_irqs = 0; info.num_irqs = VFIO_AP_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
} }
static ssize_t vfio_ap_get_irq_info(unsigned long arg)
{
unsigned long minsz;
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS)
return -EINVAL;
switch (info.index) {
case VFIO_AP_REQ_IRQ_INDEX:
info.count = 1;
info.flags = VFIO_IRQ_INFO_EVENTFD;
break;
default:
return -EINVAL;
}
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg)
{
int ret;
size_t data_size;
unsigned long minsz;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(irq_set, (void __user *)arg, minsz))
return -EFAULT;
ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS,
&data_size);
if (ret)
return ret;
if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER))
return -EINVAL;
return 0;
}
static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev,
unsigned long arg)
{
s32 fd;
void __user *data;
unsigned long minsz;
struct eventfd_ctx *req_trigger;
minsz = offsetofend(struct vfio_irq_set, count);
data = (void __user *)(arg + minsz);
if (get_user(fd, (s32 __user *)data))
return -EFAULT;
if (fd == -1) {
if (matrix_mdev->req_trigger)
eventfd_ctx_put(matrix_mdev->req_trigger);
matrix_mdev->req_trigger = NULL;
} else if (fd >= 0) {
req_trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(req_trigger))
return PTR_ERR(req_trigger);
if (matrix_mdev->req_trigger)
eventfd_ctx_put(matrix_mdev->req_trigger);
matrix_mdev->req_trigger = req_trigger;
} else {
return -EINVAL;
}
return 0;
}
static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
unsigned long arg)
{
int ret;
struct vfio_irq_set irq_set;
ret = vfio_ap_irq_set_init(&irq_set, arg);
if (ret)
return ret;
switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
case VFIO_IRQ_SET_DATA_EVENTFD:
switch (irq_set.index) {
case VFIO_AP_REQ_IRQ_INDEX:
return vfio_ap_set_request_irq(matrix_mdev, arg);
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
...@@ -1770,6 +1895,12 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, ...@@ -1770,6 +1895,12 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
case VFIO_DEVICE_RESET: case VFIO_DEVICE_RESET:
ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
break; break;
case VFIO_DEVICE_GET_IRQ_INFO:
ret = vfio_ap_get_irq_info(arg);
break;
case VFIO_DEVICE_SET_IRQS:
ret = vfio_ap_set_irqs(matrix_mdev, arg);
break;
default: default:
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
break; break;
...@@ -1844,6 +1975,7 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { ...@@ -1844,6 +1975,7 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.bind_iommufd = vfio_iommufd_emulated_bind, .bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind, .unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas, .attach_ioas = vfio_iommufd_emulated_attach_ioas,
.request = vfio_ap_mdev_request
}; };
static struct mdev_driver vfio_ap_matrix_driver = { static struct mdev_driver vfio_ap_matrix_driver = {
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/mdev.h> #include <linux/mdev.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/eventfd.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/vfio.h> #include <linux/vfio.h>
...@@ -103,6 +104,7 @@ struct ap_queue_table { ...@@ -103,6 +104,7 @@ struct ap_queue_table {
* PQAP(AQIC) instruction. * PQAP(AQIC) instruction.
* @mdev: the mediated device * @mdev: the mediated device
* @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev
* @req_trigger eventfd ctx for signaling userspace to return a device
* @apm_add: bitmap of APIDs added to the host's AP configuration * @apm_add: bitmap of APIDs added to the host's AP configuration
* @aqm_add: bitmap of APQIs added to the host's AP configuration * @aqm_add: bitmap of APQIs added to the host's AP configuration
* @adm_add: bitmap of control domain numbers added to the host's AP * @adm_add: bitmap of control domain numbers added to the host's AP
...@@ -117,6 +119,7 @@ struct ap_matrix_mdev { ...@@ -117,6 +119,7 @@ struct ap_matrix_mdev {
crypto_hook pqap_hook; crypto_hook pqap_hook;
struct mdev_device *mdev; struct mdev_device *mdev;
struct ap_queue_table qtable; struct ap_queue_table qtable;
struct eventfd_ctx *req_trigger;
DECLARE_BITMAP(apm_add, AP_DEVICES); DECLARE_BITMAP(apm_add, AP_DEVICES);
DECLARE_BITMAP(aqm_add, AP_DOMAINS); DECLARE_BITMAP(aqm_add, AP_DOMAINS);
DECLARE_BITMAP(adm_add, AP_DOMAINS); DECLARE_BITMAP(adm_add, AP_DOMAINS);
......
...@@ -646,6 +646,15 @@ enum { ...@@ -646,6 +646,15 @@ enum {
VFIO_CCW_NUM_IRQS VFIO_CCW_NUM_IRQS
}; };
/*
* The vfio-ap bus driver makes use of the following IRQ index mapping.
* Unimplemented IRQ types return a count of zero.
*/
enum {
VFIO_AP_REQ_IRQ_INDEX,
VFIO_AP_NUM_IRQS
};
/** /**
* VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12, * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info) * struct vfio_pci_hot_reset_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment