Commit f479c01c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "The bulk of the s390 updates for v3.14.

  New features are the perf support for the CPU-Measurement Sample
  Facility and the EP11 support for the crypto cards.  And the normal
  cleanups and bug-fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (44 commits)
  s390/cpum_sf: fix printk format warnings
  s390: Fix misspellings using 'codespell' tool
  s390/qdio: bridgeport support - CHSC part
  s390: delete new instances of __cpuinit usage
  s390/compat: fix PSW32_USER_BITS definition
  s390/zcrypt: add support for EP11 coprocessor cards
  s390/mm: optimize randomize_et_dyn for !PF_RANDOMIZE
  s390: use IS_ENABLED to check if a CONFIG is set to y or m
  s390/cio: use device_lock to synchronize calls to the ccwgroup driver
  s390/cio: use device_lock to synchronize calls to the ccw driver
  s390/cio: fix unlocked access of online member
  s390/cpum_sf: Add flag to process full SDBs only
  s390/cpum_sf: Add raw data sampling to support the diagnostic-sampling function
  s390/cpum_sf: Filter perf events based event->attr.exclude_* settings
  s390/cpum_sf: Detect KVM guest samples
  s390/cpum_sf: Add helper to read TOD from trailer entries
  s390/cpum_sf: Atomically reset trailer entry fields of sample-data-blocks
  s390/cpum_sf: Dynamically extend the sampling buffer if overflows occur
  s390/pci: reenable per default
  s390/pci/dma: fix accounting of allocated_pages
  ...
parents d8ec26d7 f85168e4
/*?
* Text: "Cryptographic device %x failed and was set offline\n"
* Severity: Error
* Parameter:
* @1: device index
* Description:
* A cryptographic device failed to process a cryptographic request.
* The cryptographic device driver could not correct the error and
* set the device offline. The application that issued the
* request received an indication that the request has failed.
* User action:
* Use the lszcrypt command to confirm that the cryptographic
* hardware is still configured to your LPAR or z/VM guest virtual
* machine. If the device is available to your Linux instance the
* command output contains a line that begins with 'card<device index>',
* where <device index> is the two-digit decimal number in the message text.
* After ensuring that the device is available, use the chzcrypt command to
* set it online again.
* If the error persists, contact your support organization.
*/
......@@ -38,7 +38,8 @@
#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
PSW32_ASC_PRIMARY)
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
......
......@@ -56,6 +56,96 @@ struct cpumf_ctr_info {
u32 reserved2[12];
} __packed;
/* QUERY SAMPLING INFORMATION block */
struct hws_qsi_info_block { /* Bit(s) */
unsigned int b0_13:14; /* 0-13: zeros */
unsigned int as:1; /* 14: basic-sampling authorization */
unsigned int ad:1; /* 15: diag-sampling authorization */
unsigned int b16_21:6; /* 16-21: zeros */
unsigned int es:1; /* 22: basic-sampling enable control */
unsigned int ed:1; /* 23: diag-sampling enable control */
unsigned int b24_29:6; /* 24-29: zeros */
unsigned int cs:1; /* 30: basic-sampling activation control */
unsigned int cd:1; /* 31: diag-sampling activation control */
unsigned int bsdes:16; /* 4-5: size of basic sampling entry */
unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */
unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
unsigned long tear; /* 24-31: TEAR contents */
unsigned long dear; /* 32-39: DEAR contents */
unsigned int rsvrd0; /* 40-43: reserved */
unsigned int cpu_speed; /* 44-47: CPU speed */
unsigned long long rsvrd1; /* 48-55: reserved */
unsigned long long rsvrd2; /* 56-63: reserved */
} __packed;
/* SET SAMPLING CONTROLS request block */
struct hws_lsctl_request_block {
unsigned int s:1; /* 0: maximum buffer indicator */
unsigned int h:1; /* 1: part. level reserved for VM use*/
unsigned long long b2_53:52;/* 2-53: zeros */
unsigned int es:1; /* 54: basic-sampling enable control */
unsigned int ed:1; /* 55: diag-sampling enable control */
unsigned int b56_61:6; /* 56-61: - zeros */
unsigned int cs:1; /* 62: basic-sampling activation control */
unsigned int cd:1; /* 63: diag-sampling activation control */
unsigned long interval; /* 8-15: sampling interval */
unsigned long tear; /* 16-23: TEAR contents */
unsigned long dear; /* 24-31: DEAR contents */
/* 32-63: */
unsigned long rsvrd1; /* reserved */
unsigned long rsvrd2; /* reserved */
unsigned long rsvrd3; /* reserved */
unsigned long rsvrd4; /* reserved */
} __packed;
struct hws_basic_entry {
unsigned int def:16; /* 0-15 Data Entry Format */
unsigned int R:4; /* 16-19 reserved */
unsigned int U:4; /* 20-23 Number of unique instruct. */
unsigned int z:2; /* zeros */
unsigned int T:1; /* 26 PSW DAT mode */
unsigned int W:1; /* 27 PSW wait state */
unsigned int P:1; /* 28 PSW Problem state */
unsigned int AS:2; /* 29-30 PSW address-space control */
unsigned int I:1; /* 31 entry valid or invalid */
unsigned int:16;
unsigned int prim_asn:16; /* primary ASN */
unsigned long long ia; /* Instruction Address */
unsigned long long gpp; /* Guest Program Parameter */
unsigned long long hpp; /* Host Program Parameter */
} __packed;
struct hws_diag_entry {
unsigned int def:16; /* 0-15 Data Entry Format */
unsigned int R:14; /* 16-19 and 20-30 reserved */
unsigned int I:1; /* 31 entry valid or invalid */
u8 data[]; /* Machine-dependent sample data */
} __packed;
struct hws_combined_entry {
struct hws_basic_entry basic; /* Basic-sampling data entry */
struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
} __packed;
struct hws_trailer_entry {
union {
struct {
unsigned int f:1; /* 0 - Block Full Indicator */
unsigned int a:1; /* 1 - Alert request control */
unsigned int t:1; /* 2 - Timestamp format */
unsigned long long:61; /* 3 - 63: Reserved */
};
unsigned long long flags; /* 0 - 63: All indicators */
};
unsigned long long overflow; /* 64 - sample Overflow count */
unsigned char timestamp[16]; /* 16 - 31 timestamp */
unsigned long long reserved1; /* 32 -Reserved */
unsigned long long reserved2; /* */
unsigned long long progusage1; /* 48 - reserved for programming use */
unsigned long long progusage2; /* */
} __packed;
/* Query counter information */
static inline int qctri(struct cpumf_ctr_info *info)
{
......@@ -99,4 +189,95 @@ static inline int ecctr(u64 ctr, u64 *val)
return cc;
}
/* Query sampling information */
static inline int qsi(struct hws_qsi_info_block *info)
{
int cc;
cc = 1;
asm volatile(
"0: .insn s,0xb2860000,0(%1)\n"
"1: lhi %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (cc), "+a" (info)
: "m" (*info)
: "cc", "memory");
return cc ? -EINVAL : 0;
}
/* Load sampling controls */
static inline int lsctl(struct hws_lsctl_request_block *req)
{
int cc;
cc = 1;
asm volatile(
"0: .insn s,0xb2870000,0(%1)\n"
"1: ipm %0\n"
" srl %0,28\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (cc), "+a" (req)
: "m" (*req)
: "cc", "memory");
return cc ? -EINVAL : 0;
}
/* Sampling control helper functions */
#include <linux/time.h>
static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi,
unsigned long freq)
{
return (USEC_PER_SEC / freq) * qsi->cpu_speed;
}
static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
unsigned long rate)
{
return USEC_PER_SEC * qsi->cpu_speed / rate;
}
#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL
#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
/* Return TOD timestamp contained in an trailer entry */
static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
{
/* TOD in STCKE format */
if (te->t)
return *((unsigned long long *) &te->timestamp[1]);
/* TOD in STCK format */
return *((unsigned long long *) &te->timestamp[0]);
}
/* Return pointer to trailer entry of an sample data block */
static inline unsigned long *trailer_entry_ptr(unsigned long v)
{
void *ret;
ret = (void *) v;
ret += PAGE_SIZE;
ret -= sizeof(struct hws_trailer_entry);
return (unsigned long *) ret;
}
/* Return if the entry in the sample data block table (sdbt)
* is a link to the next sdbt */
static inline int is_link_entry(unsigned long *s)
{
return *s & 0x1ul ? 1 : 0;
}
/* Return pointer to the linked sdbt */
static inline unsigned long *get_next_sdbt(unsigned long *s)
{
return (unsigned long *) (*s & ~0x1ul);
}
#endif /* _ASM_S390_CPU_MF_H */
......@@ -29,6 +29,8 @@ struct css_general_char {
u32 fcx : 1; /* bit 88 */
u32 : 19;
u32 alt_ssi : 1; /* bit 108 */
u32:1;
u32 narf:1; /* bit 110 */
} __packed;
extern struct css_general_char css_general_characteristics;
......
......@@ -144,6 +144,7 @@ int clp_disable_fh(struct zpci_dev *);
void zpci_event_error(void *);
void zpci_event_availability(void *);
void zpci_rescan(void);
bool zpci_is_enabled(void);
#else /* CONFIG_PCI */
static inline void zpci_event_error(void *e) {}
static inline void zpci_event_availability(void *e) {}
......
/*
* Performance event support - s390 specific definitions.
*
* Copyright IBM Corp. 2009, 2012
* Copyright IBM Corp. 2009, 2013
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#include <asm/cpu_mf.h>
#ifndef _ASM_S390_PERF_EVENT_H
#define _ASM_S390_PERF_EVENT_H
/* CPU-measurement counter facility */
#define PERF_CPUM_CF_MAX_CTR 256
#ifdef CONFIG_64BIT
#include <linux/perf_event.h>
#include <linux/device.h>
#include <asm/cpu_mf.h>
/* Per-CPU flags for PMU states */
#define PMU_F_RESERVED 0x1000
#define PMU_F_ENABLED 0x2000
#define PMU_F_IN_USE 0x4000
#define PMU_F_ERR_IBE 0x0100
#define PMU_F_ERR_LSDA 0x0200
#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
/* Perf defintions for PMU event attributes in sysfs */
extern __init const struct attribute_group **cpumf_cf_event_group(void);
extern ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page);
#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name
#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr)
#define CPUMF_EVENT_ATTR(cat, name, id) \
PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show)
#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name)
#ifdef CONFIG_64BIT
/* Perf callbacks */
struct pt_regs;
......@@ -23,4 +42,55 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
/* Perf pt_regs extension for sample-data-entry indicators */
struct perf_sf_sde_regs {
unsigned char in_guest:1; /* guest sample */
unsigned long reserved:63; /* reserved */
};
/* Perf PMU definitions for the counter facility */
#define PERF_CPUM_CF_MAX_CTR 256
/* Perf PMU definitions for the sampling facility */
#define PERF_CPUM_SF_MAX_CTR 2
#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE)
#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
#define REG_NONE 0
#define REG_OVERFLOW 1
#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
#define RAWSAMPLE_REG(hwc) ((hwc)->config)
#define TEAR_REG(hwc) ((hwc)->last_tag)
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
/* Structure for sampling data entries to be passed as perf raw sample data
* to user space. Note that raw sample data must be aligned and, thus, might
* be padded with zeros.
*/
struct sf_raw_sample {
#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE
#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE
u64 format;
u32 size; /* Size of sf_raw_sample */
u16 bsdes; /* Basic-sampling data entry size */
u16 dsdes; /* Diagnostic-sampling data entry size */
struct hws_basic_entry basic; /* Basic-sampling data entry */
struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
u8 padding[]; /* Padding to next multiple of 8 */
} __packed;
/* Perf hardware reserve and release functions */
int perf_reserve_sampling(void);
void perf_release_sampling(void);
#endif /* CONFIG_64BIT */
#endif /* _ASM_S390_PERF_EVENT_H */
......@@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
/**
* struct qdio_initialize - qdio initalization data
* struct qdio_initialize - qdio initialization data
* @cdev: associated ccw device
* @q_format: queue format
* @adapter_name: name for the adapter
......@@ -378,6 +378,34 @@ struct qdio_initialize {
struct qdio_outbuf_state *output_sbal_state_array;
};
/**
* enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc()
* @l3_ipv6_addr: entry contains IPv6 address
* @l3_ipv4_addr: entry contains IPv4 address
* @l2_addr_lnid: entry contains MAC address and VLAN ID
*/
enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid};
/**
* struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc()
* @nit: Network interface token
* @addr: Address of one of the three types
*
* The struct is passed to the callback function by qdio_brinfo_desc()
*/
struct qdio_brinfo_entry_l3_ipv6 {
u64 nit;
struct { unsigned char _s6_addr[16]; } addr;
} __packed;
struct qdio_brinfo_entry_l3_ipv4 {
u64 nit;
struct { uint32_t _s_addr; } addr;
} __packed;
struct qdio_brinfo_entry_l2 {
u64 nit;
struct { u8 mac[6]; u16 lnid; } addr_lnid;
} __packed;
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
......@@ -399,5 +427,10 @@ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
extern int qdio_pnso_brinfo(struct subchannel_id schid,
int cnc, u16 *response,
void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
void *entry),
void *priv);
#endif /* __QDIO_H__ */
......@@ -52,8 +52,8 @@ int sclp_chp_configure(struct chp_id chpid);
int sclp_chp_deconfigure(struct chp_id chpid);
int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
bool sclp_has_linemode(void);
bool sclp_has_vt220(void);
bool __init sclp_has_linemode(void);
bool __init sclp_has_vt220(void);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
......
......@@ -154,6 +154,67 @@ struct ica_xcRB {
unsigned short priority_window;
unsigned int status;
} __attribute__((packed));
/**
* struct ep11_cprb - EP11 connectivity programming request block
* @cprb_len: CPRB header length [0x0020]
* @cprb_ver_id: CPRB version id. [0x04]
* @pad_000: Alignment pad bytes
* @flags: Admin cmd [0x80] or functional cmd [0x00]
* @func_id: Function id / subtype [0x5434]
* @source_id: Source id [originator id]
* @target_id: Target id [usage/ctrl domain id]
* @ret_code: Return code
* @reserved1: Reserved
* @reserved2: Reserved
* @payload_len: Payload length
*/
struct ep11_cprb {
uint16_t cprb_len;
unsigned char cprb_ver_id;
unsigned char pad_000[2];
unsigned char flags;
unsigned char func_id[2];
uint32_t source_id;
uint32_t target_id;
uint32_t ret_code;
uint32_t reserved1;
uint32_t reserved2;
uint32_t payload_len;
} __attribute__((packed));
/**
* struct ep11_target_dev - EP11 target device list
* @ap_id: AP device id
* @dom_id: Usage domain id
*/
struct ep11_target_dev {
uint16_t ap_id;
uint16_t dom_id;
};
/**
* struct ep11_urb - EP11 user request block
* @targets_num: Number of target adapters
* @targets: Addr to target adapter list
* @weight: Level of request priority
* @req_no: Request id/number
* @req_len: Request length
* @req: Addr to request block
* @resp_len: Response length
* @resp: Addr to response block
*/
struct ep11_urb {
uint16_t targets_num;
uint64_t targets;
uint64_t weight;
uint64_t req_no;
uint64_t req_len;
uint64_t req;
uint64_t resp_len;
uint64_t resp;
} __attribute__((packed));
#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
#define ZCRYPT_IOCTL_MAGIC 'z'
......@@ -183,6 +244,9 @@ struct ica_xcRB {
* ZSECSENDCPRB
* Send an arbitrary CPRB to a crypto card.
*
* ZSENDEP11CPRB
* Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card.
*
* Z90STAT_STATUS_MASK
* Return an 64 element array of unsigned chars for the status of
* all devices.
......@@ -256,6 +320,7 @@ struct ica_xcRB {
#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0)
#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
/* New status calls */
#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
......
......@@ -60,7 +60,8 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
ifdef CONFIG_64BIT
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
perf_cpum_cf_events.o
obj-y += runtime_instr.o cache.o
endif
......
......@@ -412,8 +412,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
(u16 __force __user *)(frame->retcode));
if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
(u16 __force __user *)(frame->retcode)))
goto give_sigsegv;
}
/* Set up backchain. */
......
......@@ -74,7 +74,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
.endm
.macro LPP newpp
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
#if IS_ENABLED(CONFIG_KVM)
tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
jz .+8
.insn s,0xb2800000,\newpp
......@@ -82,7 +82,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
.endm
.macro HANDLE_SIE_INTERCEPT scratch,reason
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
#if IS_ENABLED(CONFIG_KVM)
tmhh %r8,0x0001 # interrupting from user ?
jnz .+62
lgr \scratch,%r9
......@@ -946,7 +946,7 @@ cleanup_idle_insn:
.quad __critical_end - __critical_start
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
#if IS_ENABLED(CONFIG_KVM)
/*
* sie64a calling convention:
* %r2 pointer to sie control block
......@@ -975,7 +975,7 @@ sie_done:
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
# instructions beween sie64a and sie_done should not cause program
# instructions between sie64a and sie_done should not cause program
# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
# See also HANDLE_SIE_INTERCEPT
rewind_pad:
......
......@@ -680,6 +680,7 @@ static int __init cpumf_pmu_init(void)
goto out;
}
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) {
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
......
This diff is collapsed.
This diff is collapsed.
/*
* Performance event support for s390x
*
* Copyright IBM Corp. 2012
* Copyright IBM Corp. 2012, 2013
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
......@@ -16,15 +16,19 @@
#include <linux/kvm_host.h>
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
#include <asm/irq.h>
#include <asm/cpu_mf.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sysinfo.h>
const char *perf_pmu_name(void)
{
if (cpum_cf_avail() || cpum_sf_avail())
return "CPU-measurement facilities (CPUMF)";
return "CPU-Measurement Facilities (CPU-MF)";
return "pmu";
}
EXPORT_SYMBOL(perf_pmu_name);
......@@ -35,6 +39,8 @@ int perf_num_counters(void)
if (cpum_cf_avail())
num += PERF_CPUM_CF_MAX_CTR;
if (cpum_sf_avail())
num += PERF_CPUM_SF_MAX_CTR;
return num;
}
......@@ -54,7 +60,7 @@ static bool is_in_guest(struct pt_regs *regs)
{
if (user_mode(regs))
return false;
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
#if IS_ENABLED(CONFIG_KVM)
return instruction_pointer(regs) == (unsigned long) &sie_exit;
#else
return false;
......@@ -83,8 +89,31 @@ static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
: PERF_RECORD_MISC_GUEST_KERNEL;
}
static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
{
struct perf_sf_sde_regs *sde_regs;
unsigned long flags;
sde_regs = (struct perf_sf_sde_regs *) &regs->int_parm_long;
if (sde_regs->in_guest)
flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
: PERF_RECORD_MISC_GUEST_KERNEL;
else
flags = user_mode(regs) ? PERF_RECORD_MISC_USER
: PERF_RECORD_MISC_KERNEL;
return flags;
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
/* Check if the cpum_sf PMU has created the pt_regs structure.
* In this case, perf misc flags can be easily extracted. Otherwise,
* do regular checks on the pt_regs content.
*/
if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
if (!regs->gprs[15])
return perf_misc_flags_sf(regs);
if (is_in_guest(regs))
return perf_misc_guest_flags(regs);
......@@ -92,27 +121,107 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
: PERF_RECORD_MISC_KERNEL;
}
void perf_event_print_debug(void)
void print_debug_cf(void)
{
struct cpumf_ctr_info cf_info;
unsigned long flags;
int cpu;
if (!cpum_cf_avail())
return;
local_irq_save(flags);
int cpu = smp_processor_id();
cpu = smp_processor_id();
memset(&cf_info, 0, sizeof(cf_info));
if (!qctri(&cf_info))
pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
cpu, cf_info.cfvn, cf_info.csvn,
cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
}
static void print_debug_sf(void)
{
struct hws_qsi_info_block si;
int cpu = smp_processor_id();
memset(&si, 0, sizeof(si));
if (qsi(&si))
return;
pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
si.cpu_speed);
if (si.as)
pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
" bsdes=%i tear=%016lx dear=%016lx\n", cpu,
si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
if (si.ad)
pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
" dsdes=%i tear=%016lx dear=%016lx\n", cpu,
si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
}
void perf_event_print_debug(void)
{
unsigned long flags;
local_irq_save(flags);
if (cpum_cf_avail())
print_debug_cf();
if (cpum_sf_avail())
print_debug_sf();
local_irq_restore(flags);
}
/* Service level infrastructure */
static void sl_print_counter(struct seq_file *m)
{
struct cpumf_ctr_info ci;
memset(&ci, 0, sizeof(ci));
if (qctri(&ci))
return;
seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
"authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
}
static void sl_print_sampling(struct seq_file *m)
{
struct hws_qsi_info_block si;
memset(&si, 0, sizeof(si));
if (qsi(&si))
return;
if (!si.as && !si.ad)
return;
seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
" cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
si.cpu_speed);
if (si.as)
seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
" sample_size=%u\n", si.bsdes);
if (si.ad)
seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
" sample_size=%u\n", si.dsdes);
}
static void service_level_perf_print(struct seq_file *m,
struct service_level *sl)
{
if (cpum_cf_avail())
sl_print_counter(m);
if (cpum_sf_avail())
sl_print_sampling(m);
}
static struct service_level service_level_perf = {
.seq_print = service_level_perf_print,
};
static int __init service_level_perf_register(void)
{
return register_service_level(&service_level_perf);
}
arch_initcall(service_level_perf_register);
/* See also arch/s390/kernel/traps.c */
static unsigned long __store_trace(struct perf_callchain_entry *entry,
unsigned long sp,
......@@ -172,3 +281,44 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
__store_trace(entry, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
}
/* Perf defintions for PMU event attributes in sysfs */
ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%04llx,name=%s\n",
pmu_attr->id, attr->attr.name);
}
/* Reserve/release functions for sharing perf hardware */
static DEFINE_SPINLOCK(perf_hw_owner_lock);
static void *perf_sampling_owner;
int perf_reserve_sampling(void)
{
int err;
err = 0;
spin_lock(&perf_hw_owner_lock);
if (perf_sampling_owner) {
pr_warn("The sampling facility is already reserved by %p\n",
perf_sampling_owner);
err = -EBUSY;
} else
perf_sampling_owner = __builtin_return_address(0);
spin_unlock(&perf_hw_owner_lock);
return err;
}
EXPORT_SYMBOL(perf_reserve_sampling);
void perf_release_sampling(void)
{
spin_lock(&perf_hw_owner_lock);
WARN_ON(!perf_sampling_owner);
perf_sampling_owner = NULL;
spin_unlock(&perf_hw_owner_lock);
}
EXPORT_SYMBOL(perf_release_sampling);
......@@ -261,20 +261,18 @@ static inline unsigned long brk_rnd(void)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
unsigned long ret;
if (ret < mm->brk)
return mm->brk;
return ret;
ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk;
}
unsigned long randomize_et_dyn(unsigned long base)
{
unsigned long ret = PAGE_ALIGN(base + brk_rnd());
unsigned long ret;
if (!(current->flags & PF_RANDOMIZE))
return base;
if (ret < base)
return base;
return ret;
ret = PAGE_ALIGN(base + brk_rnd());
return (ret > base) ? ret : base;
}
......@@ -56,25 +56,26 @@ void update_cr_regs(struct task_struct *task)
#ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
unsigned long cr[3], cr_new[3];
unsigned long cr, cr_new;
__ctl_store(cr, 0, 2);
cr_new[1] = cr[1];
__ctl_store(cr, 0, 0);
/* Set or clear transaction execution TXC bit 8. */
cr_new = cr | (1UL << 55);
if (task->thread.per_flags & PER_FLAG_NO_TE)
cr_new[0] = cr[0] & ~(1UL << 55);
else
cr_new[0] = cr[0] | (1UL << 55);
cr_new &= ~(1UL << 55);
if (cr_new != cr)
__ctl_load(cr, 0, 0);
/* Set or clear transaction execution TDC bits 62 and 63. */
cr_new[2] = cr[2] & ~3UL;
__ctl_store(cr, 2, 2);
cr_new = cr & ~3UL;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
cr_new[2] |= 1UL;
cr_new |= 1UL;
else
cr_new[2] |= 2UL;
cr_new |= 2UL;
}
if (memcmp(&cr_new, &cr, sizeof(cr)))
__ctl_load(cr_new, 0, 2);
if (cr_new != cr)
__ctl_load(cr_new, 2, 2);
}
#endif
/* Copy user specified PER registers */
......@@ -107,15 +108,11 @@ void update_cr_regs(struct task_struct *task)
void user_enable_single_step(struct task_struct *task)
{
set_tsk_thread_flag(task, TIF_SINGLE_STEP);
if (task == current)
update_cr_regs(task);
}
void user_disable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
if (task == current)
update_cr_regs(task);
}
/*
......
......@@ -5,7 +5,7 @@
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
#if IS_ENABLED(CONFIG_KVM)
EXPORT_SYMBOL(sie64a);
EXPORT_SYMBOL(sie_exit);
#endif
......
......@@ -373,7 +373,7 @@ static void __init setup_lowcore(void)
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necesary if
* restart data to the absolute zero lowcore. This is necessary if
* PSW restart is done on an offline CPU that has lowcore zero.
*/
lc->restart_stack = (unsigned long) restart_stack;
......
......@@ -59,7 +59,7 @@ enum {
};
struct pcpu {
struct cpu cpu;
struct cpu *cpu;
struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
unsigned long async_stack; /* async stack for the cpu */
unsigned long panic_stack; /* panic stack for the cpu */
......@@ -159,9 +159,9 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
{
int order;
set_bit(ec_bit, &pcpu->ec_mask);
order = pcpu_running(pcpu) ?
SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
return;
order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
pcpu_sigp_retry(pcpu, order, 0);
}
......@@ -965,7 +965,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
struct cpu *c = &pcpu_devices[cpu].cpu;
struct cpu *c = pcpu_devices[cpu].cpu;
struct device *s = &c->dev;
int err = 0;
......@@ -982,10 +982,15 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
static int smp_add_present_cpu(int cpu)
{
struct cpu *c = &pcpu_devices[cpu].cpu;
struct device *s = &c->dev;
struct device *s;
struct cpu *c;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
pcpu_devices[cpu].cpu = c;
s = &c->dev;
c->hotpluggable = 1;
rc = register_cpu(c, cpu);
if (rc)
......
......@@ -275,7 +275,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
} else {
/*
* Set condition code 3 to stop the guest from issueing channel
* Set condition code 3 to stop the guest from issuing channel
* I/O instructions.
*/
kvm_s390_set_psw_cc(vcpu, 3);
......
......@@ -74,8 +74,8 @@ static size_t copy_in_kernel(size_t count, void __user *to,
/*
* Returns kernel address for user virtual address. If the returned address is
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
* contains the (negative) exception code.
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the
* address contains the (negative) exception code.
*/
#ifdef CONFIG_64BIT
......
......@@ -293,7 +293,7 @@ static int gmap_alloc_table(struct gmap *gmap,
* @addr: address in the guest address space
* @len: length of the memory area to unmap
*
* Returns 0 if the unmap succeded, -EINVAL if not.
* Returns 0 if the unmap succeeded, -EINVAL if not.
*/
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
{
......@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment);
* @from: source address in the parent address space
* @to: target address in the guest address space
*
* Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
* Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
*/
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len)
......
......@@ -26,9 +26,6 @@
#define MAX_NUM_SDB 511
#define MIN_NUM_SDB 1
#define ALERT_REQ_MASK 0x4000000000000000ul
#define BUFFER_FULL_MASK 0x8000000000000000ul
DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
struct hws_execute_parms {
......@@ -44,6 +41,7 @@ static DEFINE_MUTEX(hws_sem_oom);
static unsigned char hws_flush_all;
static unsigned int hws_oom;
static unsigned int hws_alert;
static struct workqueue_struct *hws_wq;
static unsigned int hws_state;
......@@ -65,43 +63,6 @@ static unsigned long interval;
static unsigned long min_sampler_rate;
static unsigned long max_sampler_rate;
static int ssctl(void *buffer)
{
int cc;
/* set in order to detect a program check */
cc = 1;
asm volatile(
"0: .insn s,0xB2870000,0(%1)\n"
"1: ipm %0\n"
" srl %0,28\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (cc), "+a" (buffer)
: "m" (*((struct hws_ssctl_request_block *)buffer))
: "cc", "memory");
return cc ? -EINVAL : 0 ;
}
static int qsi(void *buffer)
{
int cc;
cc = 1;
asm volatile(
"0: .insn s,0xB2860000,0(%1)\n"
"1: lhi %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (cc), "+a" (buffer)
: "m" (*((struct hws_qsi_info_block *)buffer))
: "cc", "memory");
return cc ? -EINVAL : 0;
}
static void execute_qsi(void *parms)
{
struct hws_execute_parms *ep = parms;
......@@ -113,7 +74,7 @@ static void execute_ssctl(void *parms)
{
struct hws_execute_parms *ep = parms;
ep->rc = ssctl(ep->buffer);
ep->rc = lsctl(ep->buffer);
}
static int smp_ctl_ssctl_stop(int cpu)
......@@ -214,17 +175,6 @@ static int smp_ctl_qsi(int cpu)
return ep.rc;
}
static inline unsigned long *trailer_entry_ptr(unsigned long v)
{
void *ret;
ret = (void *)v;
ret += PAGE_SIZE;
ret -= sizeof(struct hws_trailer_entry);
return (unsigned long *) ret;
}
static void hws_ext_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
......@@ -233,6 +183,9 @@ static void hws_ext_handler(struct ext_code ext_code,
if (!(param32 & CPU_MF_INT_SF_MASK))
return;
if (!hws_alert)
return;
inc_irq_stat(IRQEXT_CMS);
atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
......@@ -256,16 +209,6 @@ static void init_all_cpu_buffers(void)
}
}
static int is_link_entry(unsigned long *s)
{
return *s & 0x1ul ? 1 : 0;
}
static unsigned long *get_next_sdbt(unsigned long *s)
{
return (unsigned long *) (*s & ~0x1ul);
}
static int prepare_cpu_buffers(void)
{
int cpu;
......@@ -353,7 +296,7 @@ static int allocate_sdbt(int cpu)
}
*sdbt = sdb;
trailer = trailer_entry_ptr(*sdbt);
*trailer = ALERT_REQ_MASK;
*trailer = SDB_TE_ALERT_REQ_MASK;
sdbt++;
mutex_unlock(&hws_sem_oom);
}
......@@ -829,7 +772,7 @@ static void worker_on_interrupt(unsigned int cpu)
trailer = trailer_entry_ptr(*sdbt);
/* leave loop if no more work to do */
if (!(*trailer & BUFFER_FULL_MASK)) {
if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) {
done = 1;
if (!hws_flush_all)
continue;
......@@ -856,7 +799,7 @@ static void worker_on_interrupt(unsigned int cpu)
static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
unsigned long *dear)
{
struct hws_data_entry *sample_data_ptr;
struct hws_basic_entry *sample_data_ptr;
unsigned long *trailer;
trailer = trailer_entry_ptr(*sdbt);
......@@ -866,7 +809,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
trailer = dear;
}
sample_data_ptr = (struct hws_data_entry *)(*sdbt);
sample_data_ptr = (struct hws_basic_entry *)(*sdbt);
while ((unsigned long *)sample_data_ptr < trailer) {
struct pt_regs *regs = NULL;
......@@ -1002,6 +945,7 @@ int hwsampler_deallocate(void)
goto deallocate_exit;
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
hws_alert = 0;
deallocate_sdbt();
hws_state = HWS_DEALLOCATED;
......@@ -1116,6 +1060,7 @@ int hwsampler_shutdown(void)
if (hws_state == HWS_STOPPED) {
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
hws_alert = 0;
deallocate_sdbt();
}
if (hws_wq) {
......@@ -1190,6 +1135,7 @@ int hwsampler_start_all(unsigned long rate)
hws_oom = 1;
hws_flush_all = 0;
/* now let them in, 1407 CPUMF external interrupts */
hws_alert = 1;
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
......
......@@ -9,27 +9,7 @@
#define HWSAMPLER_H_
#include <linux/workqueue.h>
struct hws_qsi_info_block /* QUERY SAMPLING information block */
{ /* Bit(s) */
unsigned int b0_13:14; /* 0-13: zeros */
unsigned int as:1; /* 14: sampling authorisation control*/
unsigned int b15_21:7; /* 15-21: zeros */
unsigned int es:1; /* 22: sampling enable control */
unsigned int b23_29:7; /* 23-29: zeros */
unsigned int cs:1; /* 30: sampling activation control */
unsigned int:1; /* 31: reserved */
unsigned int bsdes:16; /* 4-5: size of sampling entry */
unsigned int:16; /* 6-7: reserved */
unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
unsigned long tear; /* 24-31: TEAR contents */
unsigned long dear; /* 32-39: DEAR contents */
unsigned int rsvrd0; /* 40-43: reserved */
unsigned int cpu_speed; /* 44-47: CPU speed */
unsigned long long rsvrd1; /* 48-55: reserved */
unsigned long long rsvrd2; /* 56-63: reserved */
};
#include <asm/cpu_mf.h>
struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */
{ /* bytes 0 - 7 Bit(s) */
......@@ -68,36 +48,6 @@ struct hws_cpu_buffer {
unsigned int stop_mode:1;
};
struct hws_data_entry {
unsigned int def:16; /* 0-15 Data Entry Format */
unsigned int R:4; /* 16-19 reserved */
unsigned int U:4; /* 20-23 Number of unique instruct. */
unsigned int z:2; /* zeros */
unsigned int T:1; /* 26 PSW DAT mode */
unsigned int W:1; /* 27 PSW wait state */
unsigned int P:1; /* 28 PSW Problem state */
unsigned int AS:2; /* 29-30 PSW address-space control */
unsigned int I:1; /* 31 entry valid or invalid */
unsigned int:16;
unsigned int prim_asn:16; /* primary ASN */
unsigned long long ia; /* Instruction Address */
unsigned long long gpp; /* Guest Program Parameter */
unsigned long long hpp; /* Host Program Parameter */
};
struct hws_trailer_entry {
unsigned int f:1; /* 0 - Block Full Indicator */
unsigned int a:1; /* 1 - Alert request control */
unsigned long:62; /* 2 - 63: Reserved */
unsigned long overflow; /* 64 - sample Overflow count */
unsigned long timestamp; /* 16 - time-stamp */
unsigned long timestamp1; /* */
unsigned long reserved1; /* 32 -Reserved */
unsigned long reserved2; /* */
unsigned long progusage1; /* 48 - reserved for programming use */
unsigned long progusage2; /* */
};
int hwsampler_setup(void);
int hwsampler_shutdown(void);
int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
......
......@@ -10,6 +10,7 @@
*/
#include <linux/oprofile.h>
#include <linux/perf_event.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/fs.h>
......@@ -67,6 +68,21 @@ module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
"(report cpu_type \"timer\"");
static int __oprofile_hwsampler_start(void)
{
int retval;
retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
if (retval)
return retval;
retval = hwsampler_start_all(oprofile_hw_interval);
if (retval)
hwsampler_deallocate();
return retval;
}
static int oprofile_hwsampler_start(void)
{
int retval;
......@@ -76,13 +92,13 @@ static int oprofile_hwsampler_start(void)
if (!hwsampler_running)
return timer_ops.start();
retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
retval = perf_reserve_sampling();
if (retval)
return retval;
retval = hwsampler_start_all(oprofile_hw_interval);
retval = __oprofile_hwsampler_start();
if (retval)
hwsampler_deallocate();
perf_release_sampling();
return retval;
}
......@@ -96,6 +112,7 @@ static void oprofile_hwsampler_stop(void)
hwsampler_stop_all();
hwsampler_deallocate();
perf_release_sampling();
return;
}
......
......@@ -919,17 +919,23 @@ static void zpci_mem_exit(void)
kmem_cache_destroy(zdev_fmb_cache);
}
static unsigned int s390_pci_probe;
static unsigned int s390_pci_probe = 1;
static unsigned int s390_pci_initialized;
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "on")) {
s390_pci_probe = 1;
if (!strcmp(str, "off")) {
s390_pci_probe = 0;
return NULL;
}
return str;
}
bool zpci_is_enabled(void)
{
return s390_pci_initialized;
}
static int __init pci_base_init(void)
{
int rc;
......@@ -961,6 +967,7 @@ static int __init pci_base_init(void)
if (rc)
goto out_find;
s390_pci_initialized = 1;
return 0;
out_find:
......@@ -978,5 +985,6 @@ subsys_initcall_sync(pci_base_init);
void zpci_rescan(void)
{
clp_rescan_pci_devices_simple();
if (zpci_is_enabled())
clp_rescan_pci_devices_simple();
}
......@@ -285,7 +285,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
flags |= ZPCI_TABLE_PROTECTED;
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
atomic64_add(nr_pages, &zdev->fmb->mapped_pages);
return dma_addr + (offset & ~PAGE_MASK);
}
......@@ -313,7 +313,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
zpci_err_hex(&dma_addr, sizeof(dma_addr));
}
atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
atomic64_add(npages, &zdev->fmb->unmapped_pages);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
dma_free_iommu(zdev, iommu_page_index, npages);
}
......@@ -332,7 +332,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
if (!page)
return NULL;
atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
pa = page_to_phys(page);
memset((void *) pa, 0, size);
......@@ -343,6 +342,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
return NULL;
}
atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
if (dma_handle)
*dma_handle = map;
return (void *) pa;
......@@ -352,8 +352,11 @@ static void s390_dma_free(struct device *dev, size_t size,
void *pa, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
DMA_BIDIRECTIONAL, NULL);
struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
size = PAGE_ALIGN(size);
atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long) pa, get_order(size));
}
......
......@@ -43,9 +43,8 @@ struct zpci_ccdf_avail {
u16 pec; /* PCI event code */
} __packed;
void zpci_event_error(void *data)
static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
{
struct zpci_ccdf_err *ccdf = data;
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
zpci_err("error CCDF:\n");
......@@ -58,9 +57,14 @@ void zpci_event_error(void *data)
pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
}
void zpci_event_availability(void *data)
void zpci_event_error(void *data)
{
if (zpci_is_enabled())
__zpci_event_error(data);
}
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
{
struct zpci_ccdf_avail *ccdf = data;
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
int ret;
......@@ -99,8 +103,12 @@ void zpci_event_availability(void *data)
break;
case 0x0304: /* Configured -> Standby */
if (pdev)
if (pdev) {
/* Give the driver a hint that the function is
* already unusable. */
pdev->error_state = pci_channel_io_perm_failure;
pci_stop_and_remove_bus_device(pdev);
}
zdev->fh = ccdf->fh;
zpci_disable_device(zdev);
......@@ -110,6 +118,8 @@ void zpci_event_availability(void *data)
clp_rescan_pci_devices();
break;
case 0x0308: /* Standby -> Reserved */
if (!zdev)
break;
pci_stop_root_bus(zdev->bus);
pci_remove_root_bus(zdev->bus);
break;
......@@ -117,3 +127,9 @@ void zpci_event_availability(void *data)
break;
}
}
void zpci_event_availability(void *data)
{
if (zpci_is_enabled())
__zpci_event_availability(data);
}
......@@ -3386,7 +3386,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/*
* safe offline allready running
* safe offline already running
* could only be called by normal offline so safe_offline flag
* needs to be removed to run normal offline and kill all I/O
*/
......
......@@ -183,7 +183,6 @@ extern unsigned long sclp_console_full;
extern u8 sclp_fac84;
extern unsigned long long sclp_rzm;
extern unsigned long long sclp_rnmax;
extern __initdata int sclp_early_read_info_sccb_valid;
/* useful inlines */
......
......@@ -455,8 +455,6 @@ static int __init sclp_detect_standby_memory(void)
if (OLDMEM_BASE) /* No standby memory in kdump mode */
return 0;
if (!sclp_early_read_info_sccb_valid)
return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
......
......@@ -35,11 +35,12 @@ struct read_info_sccb {
u8 _reserved5[4096 - 112]; /* 112-4095 */
} __packed __aligned(PAGE_SIZE);
static __initdata struct read_info_sccb early_read_info_sccb;
static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
static unsigned int sclp_con_has_vt220 __initdata;
static unsigned int sclp_con_has_linemode __initdata;
static unsigned long sclp_hsa_size;
static struct sclp_ipl_info sclp_ipl_info;
__initdata int sclp_early_read_info_sccb_valid;
u64 sclp_facilities;
u8 sclp_fac84;
unsigned long long sclp_rzm;
......@@ -63,15 +64,12 @@ static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
return rc;
}
static void __init sclp_read_info_early(void)
static int __init sclp_read_info_early(struct read_info_sccb *sccb)
{
int rc;
int i;
struct read_info_sccb *sccb;
int rc, i;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
sccb = &early_read_info_sccb;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
do {
memset(sccb, 0, sizeof(*sccb));
......@@ -83,24 +81,19 @@ static void __init sclp_read_info_early(void)
if (rc)
break;
if (sccb->header.response_code == 0x10) {
sclp_early_read_info_sccb_valid = 1;
break;
}
if (sccb->header.response_code == 0x10)
return 0;
if (sccb->header.response_code != 0x1f0)
break;
}
return -EIO;
}
static void __init sclp_facilities_detect(void)
static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
{
struct read_info_sccb *sccb;
sclp_read_info_early();
if (!sclp_early_read_info_sccb_valid)
if (sclp_read_info_early(sccb))
return;
sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
if (sccb->fac85 & 0x02)
......@@ -108,30 +101,22 @@ static void __init sclp_facilities_detect(void)
sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp_rzm <<= 20;
/* Save IPL information */
sclp_ipl_info.is_valid = 1;
if (sccb->flags & 0x2)
sclp_ipl_info.has_dump = 1;
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
}
bool __init sclp_has_linemode(void)
{
struct init_sccb *sccb = (void *) &sccb_early;
if (sccb->header.response_code != 0x20)
return 0;
if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
return 1;
return !!sclp_con_has_linemode;
}
bool __init sclp_has_vt220(void)
{
struct init_sccb *sccb = (void *) &sccb_early;
if (sccb->header.response_code != 0x20)
return 0;
if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
return 1;
return 0;
return !!sclp_con_has_vt220;
}
unsigned long long sclp_get_rnmax(void)
......@@ -146,19 +131,12 @@ unsigned long long sclp_get_rzm(void)
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. Therefore the sccb should have valid contents.
* called from early.c code. The sclp_facilities_detect() function retrieves
* and saves the IPL information.
*/
void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
{
struct read_info_sccb *sccb;
if (!sclp_early_read_info_sccb_valid)
return;
sccb = &early_read_info_sccb;
info->is_valid = 1;
if (sccb->flags & 0x2)
info->has_dump = 1;
memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
*info = sclp_ipl_info;
}
static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
......@@ -189,11 +167,10 @@ static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
sccb->evbuf.dbs = 1;
}
static int __init sclp_set_event_mask(unsigned long receive_mask,
static int __init sclp_set_event_mask(struct init_sccb *sccb,
unsigned long receive_mask,
unsigned long send_mask)
{
struct init_sccb *sccb = (void *) &sccb_early;
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->mask_length = sizeof(sccb_mask_t);
......@@ -202,10 +179,8 @@ static int __init sclp_set_event_mask(unsigned long receive_mask,
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
}
static long __init sclp_hsa_size_init(void)
static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
{
struct sdias_sccb *sccb = (void *) &sccb_early;
sccb_init_eq_size(sccb);
if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
......@@ -214,10 +189,8 @@ static long __init sclp_hsa_size_init(void)
return 0;
}
static long __init sclp_hsa_copy_wait(void)
static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
{
struct sccb_header *sccb = (void *) &sccb_early;
memset(sccb, 0, PAGE_SIZE);
sccb->length = PAGE_SIZE;
if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
......@@ -230,34 +203,62 @@ unsigned long sclp_get_hsa_size(void)
return sclp_hsa_size;
}
static void __init sclp_hsa_size_detect(void)
static void __init sclp_hsa_size_detect(void *sccb)
{
long size;
/* First try synchronous interface (LPAR) */
if (sclp_set_event_mask(0, 0x40000010))
if (sclp_set_event_mask(sccb, 0, 0x40000010))
return;
size = sclp_hsa_size_init();
size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
if (size != 0)
goto out;
/* Then try asynchronous interface (z/VM) */
if (sclp_set_event_mask(0x00000010, 0x40000010))
if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
return;
size = sclp_hsa_size_init();
size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
size = sclp_hsa_copy_wait();
size = sclp_hsa_copy_wait(sccb);
if (size < 0)
return;
out:
sclp_hsa_size = size;
}
static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
{
if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
return 1;
}
static void __init sclp_console_detect(struct init_sccb *sccb)
{
if (sccb->header.response_code != 0x20)
return;
if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
sclp_con_has_vt220 = 1;
if (sclp_con_check_linemode(sccb))
sclp_con_has_linemode = 1;
}
void __init sclp_early_detect(void)
{
sclp_facilities_detect();
sclp_hsa_size_detect();
sclp_set_event_mask(0, 0);
void *sccb = &sccb_early;
sclp_facilities_detect(sccb);
sclp_hsa_size_detect(sccb);
/* Turn off SCLP event notifications. Also save remote masks in the
* sccb. These are sufficient to detect sclp console capabilities.
*/
sclp_set_event_mask(sccb, 0, 0);
sclp_console_detect(sccb);
}
......@@ -125,10 +125,7 @@ static void tty3270_resize_work(struct work_struct *work);
*/
static void tty3270_set_timer(struct tty3270 *tp, int expires)
{
if (expires == 0)
del_timer(&tp->timer);
else
mod_timer(&tp->timer, jiffies + expires);
mod_timer(&tp->timer, jiffies + expires);
}
/*
......@@ -744,7 +741,6 @@ tty3270_free_view(struct tty3270 *tp)
{
int pages;
del_timer_sync(&tp->timer);
kbd_free(tp->kbd);
raw3270_request_free(tp->kreset);
raw3270_request_free(tp->read);
......@@ -877,6 +873,7 @@ tty3270_free(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
del_timer_sync(&tp->timer);
tty3270_free_screen(tp->screen, tp->view.rows);
tty3270_free_view(tp);
}
......
......@@ -260,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf)
parm = strsep(&buf, " ");
if (strcmp("free", parm) == 0)
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
else if (strcmp("add", parm) == 0)
css_schedule_eval_all_unreg(0);
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
return ccw_purge_blacklisted();
else
return -EINVAL;
css_schedule_reprobe();
return rc;
}
......
......@@ -128,14 +128,14 @@ static ssize_t ccwgroup_online_store(struct device *dev,
const char *buf, size_t count)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
unsigned long value;
int ret;
if (!dev->driver)
return -EINVAL;
if (!try_module_get(gdrv->driver.owner))
return -EINVAL;
device_lock(dev);
if (!dev->driver) {
ret = -EINVAL;
goto out;
}
ret = kstrtoul(buf, 0, &value);
if (ret)
......@@ -148,7 +148,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
else
ret = -EINVAL;
out:
module_put(gdrv->driver.owner);
device_unlock(dev);
return (ret == 0) ? count : ret;
}
......
......@@ -55,6 +55,7 @@ int chsc_error_from_response(int response)
case 0x0004:
return -EOPNOTSUPP;
case 0x000b:
case 0x0107: /* "Channel busy" for the op 0x003d */
return -EBUSY;
case 0x0100:
case 0x0102:
......@@ -237,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid)
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
{
struct schib schib;
/*
* We don't know the device yet, but since a path
* may be available now to the device we'll have
* to do recognition again.
* Since we don't have any idea about which chpid
* that beast may be on we'll have to do a stsch
* on all devices, grr...
*/
if (stsch_err(schid, &schib))
/* We're through */
return -ENXIO;
/* Put it on the slow path. */
css_schedule_eval(schid);
return 0;
}
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
......@@ -287,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link)
* The more information we have (info), the less scanning
* will we have to do.
*/
for_each_subchannel_staged(__s390_process_res_acc,
s390_process_res_acc_new_sch, link);
for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
css_schedule_reprobe();
}
static int
......@@ -663,19 +644,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
return 0;
}
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
struct schib schib;
if (stsch_err(schid, &schib))
/* We're through */
return -ENXIO;
/* Put it on the slow path. */
css_schedule_eval(schid);
return 0;
}
/**
* chsc_chp_vary - propagate channel-path vary operation to subchannels
* @chpid: channl-path ID
......@@ -694,7 +662,8 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/* Try to update the channel path description. */
chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
__s390_vary_chpid_on, &chpid);
NULL, &chpid);
css_schedule_reprobe();
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
NULL, &chpid);
......@@ -1234,3 +1203,35 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
return ret;
}
EXPORT_SYMBOL_GPL(chsc_scm_info);
/**
* chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
* @schid: id of the subchannel on which PNSO is performed
* @brinfo_area: request and response block for the operation
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
* brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
*
* Returns 0 on success.
*/
int chsc_pnso_brinfo(struct subchannel_id schid,
struct chsc_pnso_area *brinfo_area,
struct chsc_brinfo_resume_token resume_token,
int cnc)
{
memset(brinfo_area, 0, sizeof(*brinfo_area));
brinfo_area->request.length = 0x0030;
brinfo_area->request.code = 0x003d; /* network-subchannel operation */
brinfo_area->m = schid.m;
brinfo_area->ssid = schid.ssid;
brinfo_area->sch = schid.sch_no;
brinfo_area->cssid = schid.cssid;
brinfo_area->oc = 0; /* Store-network-bridging-information list */
brinfo_area->resume_token = resume_token;
brinfo_area->n = (cnc != 0);
if (chsc(brinfo_area))
return -EIO;
return chsc_error_from_response(brinfo_area->response.code);
}
EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
......@@ -61,7 +61,9 @@ struct css_chsc_char {
u32 : 20;
u32 scssc : 1; /* bit 107 */
u32 scsscf : 1; /* bit 108 */
u32 : 19;
u32:7;
u32 pnso:1; /* bit 116 */
u32:11;
}__attribute__((packed));
extern struct css_chsc_char css_chsc_characteristics;
......@@ -188,6 +190,53 @@ struct chsc_scm_info {
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
struct chsc_brinfo_resume_token {
u64 t1;
u64 t2;
} __packed;
struct chsc_brinfo_naihdr {
struct chsc_brinfo_resume_token resume_token;
u32:32;
u32 instance;
u32:24;
u8 naids;
u32 reserved[3];
} __packed;
struct chsc_pnso_area {
struct chsc_header request;
u8:2;
u8 m:1;
u8:5;
u8:2;
u8 ssid:2;
u8 fmt:4;
u16 sch;
u8:8;
u8 cssid;
u16:16;
u8 oc;
u32:24;
struct chsc_brinfo_resume_token resume_token;
u32 n:1;
u32:31;
u32 reserved[3];
struct chsc_header response;
u32:32;
struct chsc_brinfo_naihdr naihdr;
union {
struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
struct qdio_brinfo_entry_l2 l2[0];
} entries;
} __packed;
int chsc_pnso_brinfo(struct subchannel_id schid,
struct chsc_pnso_area *brinfo_area,
struct chsc_brinfo_resume_token resume_token,
int cnc);
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
int scm_process_availability_information(void);
......
......@@ -69,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data)
struct cb_data *cb = data;
int rc = 0;
idset_sch_del(cb->set, sch->schid);
if (cb->set)
idset_sch_del(cb->set, sch->schid);
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
return rc;
......@@ -115,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
if (fn_known && !fn_unknown) {
/* Skip idset allocation in case of known-only loop. */
cb.set = NULL;
return bus_for_each_dev(&css_bus_type, NULL, &cb,
call_fn_known_sch);
}
cb.set = idset_sch_new();
if (!cb.set)
/* fall back to brute force scanning in case of oom */
......@@ -553,6 +561,9 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
default:
rc = 0;
}
/* Allow scheduling here since the containing loop might
* take a while. */
cond_resched();
}
return rc;
}
......@@ -572,7 +583,7 @@ static void css_slow_path_func(struct work_struct *unused)
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static DECLARE_WORK(slow_path_work, css_slow_path_func);
static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
......@@ -582,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
......@@ -593,7 +604,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
......@@ -606,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data)
return 0;
}
static void css_schedule_eval_all_unreg(void)
void css_schedule_eval_all_unreg(unsigned long delay)
{
unsigned long flags;
struct idset *unreg_set;
......@@ -624,7 +635,7 @@ static void css_schedule_eval_all_unreg(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
......@@ -637,7 +648,8 @@ void css_wait_for_slow_path(void)
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
css_schedule_eval_all_unreg();
/* Schedule with a delay to allow merging of subsequent calls. */
css_schedule_eval_all_unreg(1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
......
......@@ -133,6 +133,7 @@ extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
void css_schedule_eval_all_unreg(unsigned long delay);
int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
......
......@@ -333,9 +333,9 @@ int ccw_device_set_offline(struct ccw_device *cdev)
if (ret != 0)
return ret;
}
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
sch = to_subchannel(cdev->dev.parent);
cdev->online = 0;
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
......@@ -446,7 +446,10 @@ int ccw_device_set_online(struct ccw_device *cdev)
ret = cdev->drv->set_online(cdev);
if (ret)
goto rollback;
spin_lock_irq(cdev->ccwlock);
cdev->online = 1;
spin_unlock_irq(cdev->ccwlock);
return 0;
rollback:
......@@ -546,17 +549,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
if (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
ret = -EAGAIN;
goto out_onoff;
goto out;
}
/* Prevent conflict between pending work and on-/offline processing.*/
if (work_pending(&cdev->private->todo_work)) {
ret = -EAGAIN;
goto out_onoff;
}
if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
ret = -EINVAL;
goto out_onoff;
goto out;
}
if (!strncmp(buf, "force\n", count)) {
force = 1;
......@@ -568,6 +566,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
}
if (ret)
goto out;
device_lock(dev);
switch (i) {
case 0:
ret = online_store_handle_offline(cdev);
......@@ -578,10 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
default:
ret = -EINVAL;
}
device_unlock(dev);
out:
if (cdev->drv)
module_put(cdev->drv->driver.owner);
out_onoff:
atomic_set(&cdev->private->onoff, 0);
return (ret < 0) ? ret : count;
}
......@@ -1745,8 +1744,7 @@ ccw_device_probe (struct device *dev)
return 0;
}
static int
ccw_device_remove (struct device *dev)
static int ccw_device_remove(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
......@@ -1754,9 +1752,10 @@ ccw_device_remove (struct device *dev)
if (cdrv->remove)
cdrv->remove(cdev);
spin_lock_irq(cdev->ccwlock);
if (cdev->online) {
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
......@@ -1769,10 +1768,12 @@ ccw_device_remove (struct device *dev)
cdev->private->dev_id.devno);
/* Give up reference obtained in ccw_device_set_online(). */
put_device(&cdev->dev);
spin_lock_irq(cdev->ccwlock);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO;
spin_unlock_irq(cdev->ccwlock);
return 0;
}
......
......@@ -1752,6 +1752,97 @@ int qdio_stop_irq(struct ccw_device *cdev, int nr)
}
EXPORT_SYMBOL(qdio_stop_irq);
/**
* qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
* @schid: Subchannel ID.
* @cnc: Boolean Change-Notification Control
* @response: Response code will be stored at this address
* @cb: Callback function will be executed for each element
* of the address list
* @priv: Pointer passed from the caller to qdio_pnso_brinfo()
* @type: Type of the address entry passed to the callback
* @entry: Entry containg the address of the specified type
* @priv: Pointer to pass to the callback function.
*
* Performs "Store-network-bridging-information list" operation and calls
* the callback function for every entry in the list. If "change-
* notification-control" is set, further changes in the address list
* will be reported via the IPA command.
*/
int qdio_pnso_brinfo(struct subchannel_id schid,
int cnc, u16 *response,
void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
void *entry),
void *priv)
{
struct chsc_pnso_area *rr;
int rc;
u32 prev_instance = 0;
int isfirstblock = 1;
int i, size, elems;
rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
if (rr == NULL)
return -ENOMEM;
do {
/* on the first iteration, naihdr.resume_token will be zero */
rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
if (rc != 0 && rc != -EBUSY)
goto out;
if (rr->response.code != 1) {
rc = -EIO;
continue;
} else
rc = 0;
if (cb == NULL)
continue;
size = rr->naihdr.naids;
elems = (rr->response.length -
sizeof(struct chsc_header) -
sizeof(struct chsc_brinfo_naihdr)) /
size;
if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
/* Inform the caller that they need to scrap */
/* the data that was already reported via cb */
rc = -EAGAIN;
break;
}
isfirstblock = 0;
prev_instance = rr->naihdr.instance;
for (i = 0; i < elems; i++)
switch (size) {
case sizeof(struct qdio_brinfo_entry_l3_ipv6):
(*cb)(priv, l3_ipv6_addr,
&rr->entries.l3_ipv6[i]);
break;
case sizeof(struct qdio_brinfo_entry_l3_ipv4):
(*cb)(priv, l3_ipv4_addr,
&rr->entries.l3_ipv4[i]);
break;
case sizeof(struct qdio_brinfo_entry_l2):
(*cb)(priv, l2_addr_lnid,
&rr->entries.l2[i]);
break;
default:
WARN_ON_ONCE(1);
rc = -EIO;
goto out;
}
} while (rr->response.code == 0x0107 || /* channel busy */
(rr->response.code == 1 && /* list stored */
/* resume token is non-zero => list incomplete */
(rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
(*response) = rr->response.code;
out:
free_page((unsigned long)rr);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
static int __init init_QDIO(void)
{
int rc;
......
......@@ -591,7 +591,13 @@ static int ap_init_queue(ap_qid_t qid)
if (rc != -ENODEV && rc != -EBUSY)
break;
if (i < AP_MAX_RESET - 1) {
udelay(5);
/* Time we are waiting until we give up (0.7sec * 90).
* Since the actual request (in progress) will not
* interrupted immediately for the reset command,
* we have to be patient. In worst case we have to
* wait 60sec + reset time (some msec).
*/
schedule_timeout(AP_RESET_TIMEOUT);
status = ap_test_queue(qid, &dummy, &dummy);
}
}
......@@ -992,6 +998,28 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
if (ap_configuration != NULL) { /* QCI not supported */
if (test_facility(76)) { /* format 1 - 256 bit domain field */
return snprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_configuration->adm[0], ap_configuration->adm[1],
ap_configuration->adm[2], ap_configuration->adm[3],
ap_configuration->adm[4], ap_configuration->adm[5],
ap_configuration->adm[6], ap_configuration->adm[7]);
} else { /* format 0 - 16 bit domain field */
return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
ap_configuration->adm[0], ap_configuration->adm[1]);
}
} else {
return snprintf(buf, PAGE_SIZE, "not supported\n");
}
}
static BUS_ATTR(ap_control_domain_mask, 0444,
ap_control_domain_mask_show, NULL);
static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
......@@ -1077,6 +1105,7 @@ static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
&bus_attr_ap_control_domain_mask,
&bus_attr_config_time,
&bus_attr_poll_thread,
&bus_attr_ap_interrupts,
......
......@@ -33,7 +33,7 @@
#define AP_DEVICES 64 /* Number of AP devices. */
#define AP_DOMAINS 16 /* Number of AP domains. */
#define AP_MAX_RESET 90 /* Maximum number of resets. */
#define AP_RESET_TIMEOUT (HZ/2) /* Time in ticks for reset timeouts. */
#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
......@@ -125,6 +125,8 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_FUNC_CRT4K 2
#define AP_FUNC_COPRO 3
#define AP_FUNC_ACCEL 4
#define AP_FUNC_EP11 5
#define AP_FUNC_APXA 6
/*
* AP reset flag states
......
......@@ -44,6 +44,8 @@
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
#include "zcrypt_msgtype6.h"
/*
* Module description.
*/
......@@ -554,9 +556,9 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) {
if (!zdev->online || !zdev->ops->send_cprb ||
(xcRB->user_defined != AUTOSELECT &&
AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
)
(zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
(xcRB->user_defined != AUTOSELECT &&
AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined))
continue;
zcrypt_device_get(zdev);
get_device(&zdev->ap_dev->device);
......@@ -581,6 +583,90 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
return -ENODEV;
}
struct ep11_target_dev_list {
unsigned short targets_num;
struct ep11_target_dev *targets;
};
static bool is_desired_ep11dev(unsigned int dev_qid,
struct ep11_target_dev_list dev_list)
{
int n;
for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) {
if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) &&
(AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) {
return true;
}
}
return false;
}
static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
struct zcrypt_device *zdev;
bool autoselect = false;
int rc;
struct ep11_target_dev_list ep11_dev_list = {
.targets_num = 0x00,
.targets = NULL,
};
ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
if (ep11_dev_list.targets_num == 0)
autoselect = true;
else {
ep11_dev_list.targets = kcalloc((unsigned short)
xcrb->targets_num,
sizeof(struct ep11_target_dev),
GFP_KERNEL);
if (!ep11_dev_list.targets)
return -ENOMEM;
if (copy_from_user(ep11_dev_list.targets,
(struct ep11_target_dev *)xcrb->targets,
xcrb->targets_num *
sizeof(struct ep11_target_dev)))
return -EFAULT;
}
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) {
/* check if device is eligible */
if (!zdev->online ||
zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
continue;
/* check if device is selected as valid target */
if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) &&
!autoselect)
continue;
zcrypt_device_get(zdev);
get_device(&zdev->ap_dev->device);
zdev->request_count++;
__zcrypt_decrease_preference(zdev);
if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
spin_unlock_bh(&zcrypt_device_lock);
rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
spin_lock_bh(&zcrypt_device_lock);
module_put(zdev->ap_dev->drv->driver.owner);
} else {
rc = -EAGAIN;
}
zdev->request_count--;
__zcrypt_increase_preference(zdev);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
spin_unlock_bh(&zcrypt_device_lock);
return rc;
}
spin_unlock_bh(&zcrypt_device_lock);
return -ENODEV;
}
static long zcrypt_rng(char *buffer)
{
struct zcrypt_device *zdev;
......@@ -784,6 +870,23 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
return rc;
}
case ZSENDEP11CPRB: {
struct ep11_urb __user *uxcrb = (void __user *)arg;
struct ep11_urb xcrb;
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
rc = zcrypt_send_ep11_cprb(&xcrb);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_send_ep11_cprb(&xcrb);
} while (rc == -EAGAIN);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
}
case Z90STAT_STATUS_MASK: {
char status[AP_DEVICES];
zcrypt_status_mask(status);
......
......@@ -74,6 +74,7 @@ struct ica_z90_status {
#define ZCRYPT_CEX2A 6
#define ZCRYPT_CEX3C 7
#define ZCRYPT_CEX3A 8
#define ZCRYPT_CEX4 10
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
......@@ -89,6 +90,7 @@ struct zcrypt_ops {
long (*rsa_modexpo_crt)(struct zcrypt_device *,
struct ica_rsa_modexpo_crt *);
long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *);
long (*rng)(struct zcrypt_device *, char *);
struct list_head list; /* zcrypt ops list. */
struct module *owner;
......
......@@ -30,7 +30,12 @@
#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
#define CEX4_CLEANUP_TIME (15*HZ)
/* Waiting time for requests to be processed.
* Currently there are some types of request which are not deterministic.
* But the maximum time limit managed by the stomper code is set to 60sec.
* Hence we have to wait at least that time period.
*/
#define CEX4_CLEANUP_TIME (61*HZ)
static struct ap_device_id zcrypt_cex4_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
......@@ -101,6 +106,19 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
zdev->speed_rating = CEX4C_SPEED_RATING;
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->type_string = "CEX4P";
zdev->user_space_type = ZCRYPT_CEX4;
zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
zdev->short_crt = 0;
zdev->speed_rating = CEX4C_SPEED_RATING;
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_EP11);
}
break;
}
......
......@@ -106,15 +106,15 @@ static inline int convert_error(struct zcrypt_device *zdev,
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
/*
* To sent a message of the wrong type is a bug in the
* device driver. Warn about it, disable the device
* device driver. Send error msg, disable the device
* and then repeat the request.
*/
WARN_ON(1);
atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
zdev->ap_dev->qid,
zdev->online, ehdr->reply_code);
zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE:
......@@ -122,15 +122,17 @@ static inline int convert_error(struct zcrypt_device *zdev,
/* If a card fails disable it and repeat the request. */
atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
zdev->ap_dev->qid,
zdev->online, ehdr->reply_code);
zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN;
default:
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
zdev->ap_dev->qid,
zdev->online, ehdr->reply_code);
zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
}
......
......@@ -25,6 +25,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "zcrypt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
......@@ -332,6 +335,11 @@ static int convert_type80(struct zcrypt_device *zdev,
if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEX2A card may not do that.. */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
zdev->ap_dev->qid, zdev->online, t80h->code);
return -EAGAIN; /* repeat the request on a different device. */
}
if (zdev->user_space_type == ZCRYPT_CEX2A)
......@@ -359,6 +367,10 @@ static int convert_response(struct zcrypt_device *zdev,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
......
This diff is collapsed.
......@@ -32,6 +32,7 @@
#define MSGTYPE06_NAME "zcrypt_msgtype6"
#define MSGTYPE06_VARIANT_DEFAULT 0
#define MSGTYPE06_VARIANT_NORNG 1
#define MSGTYPE06_VARIANT_EP11 2
#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
......@@ -99,6 +100,7 @@ struct type86_hdr {
} __packed;
#define TYPE86_RSP_CODE 0x86
#define TYPE87_RSP_CODE 0x87
#define TYPE86_FMT2 0x02
struct type86_fmt2_ext {
......
......@@ -24,6 +24,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "zcrypt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
......@@ -199,6 +202,10 @@ static int convert_type84(struct zcrypt_device *zdev,
if (t84h->len < sizeof(*t84h) + outputdatalength) {
/* The result is too short, the PCICA card may not do that.. */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
zdev->ap_dev->qid, zdev->online, t84h->code);
return -EAGAIN; /* repeat the request on a different device. */
}
BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
......@@ -223,6 +230,10 @@ static int convert_response(struct zcrypt_device *zdev,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
......
......@@ -24,6 +24,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "zcrypt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfp.h>
......@@ -372,6 +375,11 @@ static int convert_type86(struct zcrypt_device *zdev,
if (service_rc == 8 && service_rs == 72)
return -EINVAL;
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
zdev->ap_dev->qid, zdev->online,
msg->hdr.reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
......@@ -425,6 +433,10 @@ static int convert_response(struct zcrypt_device *zdev,
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment