Commit ba0234ec authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (24 commits)
  [S390] drivers/s390/char: Use kmemdup
  [S390] drivers/s390/char: Use kstrdup
  [S390] debug: enable exception-trace debug facility
  [S390] s390_hypfs: Add new attributes
  [S390] qdio: remove API wrappers
  [S390] qdio: set correct bit in dsci
  [S390] qdio: dont convert timestamps to microseconds
  [S390] qdio: remove memset hack
  [S390] qdio: prevent starvation on PCI devices
  [S390] qdio: count number of qdio interrupts
  [S390] user space fault: report fault before calling do_exit
  [S390] topology: expose core identifier
  [S390] dasd: remove uid from devmap
  [S390] dasd: add dynamic pav toleration
  [S390] vdso: add missing vdso_install target
  [S390] vdso: remove redundant check for CONFIG_64BIT
  [S390] avoid default_llseek in s390 drivers
  [S390] vmcp: disallow modular build
  [S390] add breaking event address for user space
  [S390] virtualization aware cpu measurement
  ...
parents 537b60d1 939e379e
......@@ -444,13 +444,6 @@ config FORCE_MAX_ZONEORDER
int
default "9"
config PROCESS_DEBUG
bool "Show crashed user process info"
help
Say Y to print all process fault locations to the console. This is
a debugging option; you probably do not want to set it unless you
are an S390 port maintainer.
config PFAULT
bool "Pseudo page fault support"
help
......
......@@ -116,6 +116,12 @@ image bzImage: vmlinux
zfcpdump:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
vdso_install:
ifeq ($(CONFIG_64BIT),y)
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
endif
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
......
......@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/debugfs.h>
#define REG_FILE_MODE 0440
#define UPDATE_FILE_MODE 0220
......@@ -34,6 +35,9 @@ extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
/* VM Hypervisor */
extern int hypfs_vm_init(void);
extern void hypfs_vm_exit(void);
extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
/* Directory for debugfs files */
extern struct dentry *hypfs_dbfs_dir;
#endif /* _HYPFS_H_ */
......@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/ebcdic.h>
#include "hypfs.h"
......@@ -22,6 +23,8 @@
#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
#define TMP_SIZE 64 /* size of temporary buffers */
#define DBFS_D204_HDR_VERSION 0
/* diag 204 subcodes */
enum diag204_sc {
SUBC_STIB4 = 4,
......@@ -47,6 +50,8 @@ static void *diag204_buf; /* 4K aligned buffer for diag204 data */
static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */
static int diag204_buf_pages; /* number of pages for diag204 data */
static struct dentry *dbfs_d204_file;
/*
* DIAG 204 data structures and member access functions.
*
......@@ -364,18 +369,21 @@ static void diag204_free_buffer(void)
} else {
free_pages((unsigned long) diag204_buf, 0);
}
diag204_buf_pages = 0;
diag204_buf = NULL;
}
static void *page_align_ptr(void *ptr)
{
return (void *) PAGE_ALIGN((unsigned long) ptr);
}
static void *diag204_alloc_vbuf(int pages)
{
/* The buffer has to be page aligned! */
diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
if (!diag204_buf_vmalloc)
return ERR_PTR(-ENOMEM);
diag204_buf = (void*)((unsigned long)diag204_buf_vmalloc
& ~0xfffUL) + 0x1000;
diag204_buf = page_align_ptr(diag204_buf_vmalloc);
diag204_buf_pages = pages;
return diag204_buf;
}
......@@ -468,17 +476,26 @@ static int diag204_probe(void)
return rc;
}
static int diag204_do_store(void *buf, int pages)
{
int rc;
rc = diag204((unsigned long) diag204_store_sc |
(unsigned long) diag204_info_type, pages, buf);
return rc < 0 ? -ENOSYS : 0;
}
static void *diag204_store(void)
{
void *buf;
int pages;
int pages, rc;
buf = diag204_get_buffer(diag204_info_type, &pages);
if (IS_ERR(buf))
goto out;
if (diag204((unsigned long)diag204_store_sc |
(unsigned long)diag204_info_type, pages, buf) < 0)
return ERR_PTR(-ENOSYS);
rc = diag204_do_store(buf, pages);
if (rc)
return ERR_PTR(rc);
out:
return buf;
}
......@@ -526,6 +543,92 @@ static int diag224_idx2name(int index, char *name)
return 0;
}
struct dbfs_d204_hdr {
u64 len; /* Length of d204 buffer without header */
u16 version; /* Version of header */
u8 sc; /* Used subcode */
char reserved[53];
} __attribute__ ((packed));
struct dbfs_d204 {
struct dbfs_d204_hdr hdr; /* 64 byte header */
char buf[]; /* d204 buffer */
} __attribute__ ((packed));
struct dbfs_d204_private {
struct dbfs_d204 *d204; /* Aligned d204 data with header */
void *base; /* Base pointer (needed for vfree) */
};
static int dbfs_d204_open(struct inode *inode, struct file *file)
{
struct dbfs_d204_private *data;
struct dbfs_d204 *d204;
int rc, buf_size;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
data->base = vmalloc(buf_size);
if (!data->base) {
rc = -ENOMEM;
goto fail_kfree_data;
}
memset(data->base, 0, buf_size);
d204 = page_align_ptr(data->base + sizeof(d204->hdr))
- sizeof(d204->hdr);
rc = diag204_do_store(&d204->buf, diag204_buf_pages);
if (rc)
goto fail_vfree_base;
d204->hdr.version = DBFS_D204_HDR_VERSION;
d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
d204->hdr.sc = diag204_store_sc;
data->d204 = d204;
file->private_data = data;
return nonseekable_open(inode, file);
fail_vfree_base:
vfree(data->base);
fail_kfree_data:
kfree(data);
return rc;
}
static int dbfs_d204_release(struct inode *inode, struct file *file)
{
struct dbfs_d204_private *data = file->private_data;
vfree(data->base);
kfree(data);
return 0;
}
static ssize_t dbfs_d204_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct dbfs_d204_private *data = file->private_data;
return simple_read_from_buffer(buf, size, ppos, data->d204,
data->d204->hdr.len +
sizeof(data->d204->hdr));
}
static const struct file_operations dbfs_d204_ops = {
.open = dbfs_d204_open,
.read = dbfs_d204_read,
.release = dbfs_d204_release,
};
static int hypfs_dbfs_init(void)
{
dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
NULL, &dbfs_d204_ops);
if (IS_ERR(dbfs_d204_file))
return PTR_ERR(dbfs_d204_file);
return 0;
}
__init int hypfs_diag_init(void)
{
int rc;
......@@ -540,11 +643,17 @@ __init int hypfs_diag_init(void)
pr_err("The hardware system does not provide all "
"functions required by hypfs\n");
}
if (diag204_info_type == INFO_EXT) {
rc = hypfs_dbfs_init();
if (rc)
diag204_free_buffer();
}
return rc;
}
void hypfs_diag_exit(void)
{
debugfs_remove(dbfs_d204_file);
diag224_delete_name_table();
diag204_free_buffer();
}
......
......@@ -10,14 +10,18 @@
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/ebcdic.h>
#include <asm/timex.h>
#include "hypfs.h"
#define NAME_LEN 8
#define DBFS_D2FC_HDR_VERSION 0
static char local_guest[] = " ";
static char all_guests[] = "* ";
static char *guest_query;
static struct dentry *dbfs_d2fc_file;
struct diag2fc_data {
__u32 version;
__u32 flags;
......@@ -76,23 +80,26 @@ static int diag2fc(int size, char* query, void *addr)
return -residual_cnt;
}
static struct diag2fc_data *diag2fc_store(char *query, int *count)
/*
* Allocate buffer for "query" and store diag 2fc at "offset"
*/
static void *diag2fc_store(char *query, unsigned int *count, int offset)
{
void *data;
int size;
struct diag2fc_data *data;
do {
size = diag2fc(0, query, NULL);
if (size < 0)
return ERR_PTR(-EACCES);
data = vmalloc(size);
data = vmalloc(size + offset);
if (!data)
return ERR_PTR(-ENOMEM);
if (diag2fc(size, query, data) == 0)
if (diag2fc(size, query, data + offset) == 0)
break;
vfree(data);
} while (1);
*count = (size / sizeof(*data));
*count = (size / sizeof(struct diag2fc_data));
return data;
}
......@@ -168,9 +175,10 @@ int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
{
struct dentry *dir, *file;
struct diag2fc_data *data;
int rc, i, count = 0;
unsigned int count = 0;
int rc, i;
data = diag2fc_store(guest_query, &count);
data = diag2fc_store(guest_query, &count, 0);
if (IS_ERR(data))
return PTR_ERR(data);
......@@ -218,8 +226,61 @@ int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
return rc;
}
struct dbfs_d2fc_hdr {
u64 len; /* Length of d2fc buffer without header */
u16 version; /* Version of header */
char tod_ext[16]; /* TOD clock for d2fc */
u64 count; /* Number of VM guests in d2fc buffer */
char reserved[30];
} __attribute__ ((packed));
struct dbfs_d2fc {
struct dbfs_d2fc_hdr hdr; /* 64 byte header */
char buf[]; /* d2fc buffer */
} __attribute__ ((packed));
static int dbfs_d2fc_open(struct inode *inode, struct file *file)
{
struct dbfs_d2fc *data;
unsigned int count;
data = diag2fc_store(guest_query, &count, sizeof(data->hdr));
if (IS_ERR(data))
return PTR_ERR(data);
get_clock_ext(data->hdr.tod_ext);
data->hdr.len = count * sizeof(struct diag2fc_data);
data->hdr.version = DBFS_D2FC_HDR_VERSION;
data->hdr.count = count;
memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved));
file->private_data = data;
return nonseekable_open(inode, file);
}
static int dbfs_d2fc_release(struct inode *inode, struct file *file)
{
diag2fc_free(file->private_data);
return 0;
}
static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct dbfs_d2fc *data = file->private_data;
return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
sizeof(struct dbfs_d2fc_hdr));
}
static const struct file_operations dbfs_d2fc_ops = {
.open = dbfs_d2fc_open,
.read = dbfs_d2fc_read,
.release = dbfs_d2fc_release,
};
int hypfs_vm_init(void)
{
if (!MACHINE_IS_VM)
return 0;
if (diag2fc(0, all_guests, NULL) > 0)
guest_query = all_guests;
else if (diag2fc(0, local_guest, NULL) > 0)
......@@ -227,5 +288,17 @@ int hypfs_vm_init(void)
else
return -EACCES;
dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
NULL, &dbfs_d2fc_ops);
if (IS_ERR(dbfs_d2fc_file))
return PTR_ERR(dbfs_d2fc_file);
return 0;
}
void hypfs_vm_exit(void)
{
if (!MACHINE_IS_VM)
return;
debugfs_remove(dbfs_d2fc_file);
}
......@@ -46,6 +46,8 @@ static const struct super_operations hypfs_s_ops;
/* start of list of all dentries, which have to be deleted on update */
static struct dentry *hypfs_last_dentry;
struct dentry *hypfs_dbfs_dir;
static void hypfs_update_update(struct super_block *sb)
{
struct hypfs_sb_info *sb_info = sb->s_fs_info;
......@@ -145,7 +147,7 @@ static int hypfs_open(struct inode *inode, struct file *filp)
}
mutex_unlock(&fs_info->lock);
}
return 0;
return nonseekable_open(inode, filp);
}
static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
......@@ -468,20 +470,22 @@ static int __init hypfs_init(void)
{
int rc;
if (MACHINE_IS_VM) {
if (hypfs_vm_init())
/* no diag 2fc, just exit */
return -ENODATA;
} else {
hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
if (IS_ERR(hypfs_dbfs_dir))
return PTR_ERR(hypfs_dbfs_dir);
if (hypfs_diag_init()) {
rc = -ENODATA;
goto fail_diag;
goto fail_debugfs_remove;
}
if (hypfs_vm_init()) {
rc = -ENODATA;
goto fail_hypfs_diag_exit;
}
s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
if (!s390_kobj) {
rc = -ENOMEM;
goto fail_sysfs;
goto fail_hypfs_vm_exit;
}
rc = register_filesystem(&hypfs_type);
if (rc)
......@@ -490,18 +494,22 @@ static int __init hypfs_init(void)
fail_filesystem:
kobject_put(s390_kobj);
fail_sysfs:
if (!MACHINE_IS_VM)
fail_hypfs_vm_exit:
hypfs_vm_exit();
fail_hypfs_diag_exit:
hypfs_diag_exit();
fail_diag:
fail_debugfs_remove:
debugfs_remove(hypfs_dbfs_dir);
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
static void __exit hypfs_exit(void)
{
if (!MACHINE_IS_VM)
hypfs_diag_exit();
hypfs_vm_exit();
debugfs_remove(hypfs_dbfs_dir);
unregister_filesystem(&hypfs_type);
kobject_put(s390_kobj);
}
......
......@@ -188,15 +188,16 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
void vtime_start_cpu(void);
void vtime_start_cpu(__u64 int_clock, __u64 enter_timer);
cputime64_t s390_get_idle_time(int cpu);
#define arch_idle_time(cpu) s390_get_idle_time(cpu)
static inline void s390_idle_check(void)
static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
__u64 enter_timer)
{
if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
vtime_start_cpu();
if (regs->psw.mask & PSW_MASK_WAIT)
vtime_start_cpu(int_clock, enter_timer);
}
static inline int s390_nohz_delay(int cpu)
......
......@@ -104,38 +104,39 @@ struct _lowcore {
/* CPU time accounting values */
__u64 sync_enter_timer; /* 0x0250 */
__u64 async_enter_timer; /* 0x0258 */
__u64 exit_timer; /* 0x0260 */
__u64 user_timer; /* 0x0268 */
__u64 system_timer; /* 0x0270 */
__u64 steal_timer; /* 0x0278 */
__u64 last_update_timer; /* 0x0280 */
__u64 last_update_clock; /* 0x0288 */
__u64 mcck_enter_timer; /* 0x0260 */
__u64 exit_timer; /* 0x0268 */
__u64 user_timer; /* 0x0270 */
__u64 system_timer; /* 0x0278 */
__u64 steal_timer; /* 0x0280 */
__u64 last_update_timer; /* 0x0288 */
__u64 last_update_clock; /* 0x0290 */
/* Current process. */
__u32 current_task; /* 0x0290 */
__u32 thread_info; /* 0x0294 */
__u32 kernel_stack; /* 0x0298 */
__u32 current_task; /* 0x0298 */
__u32 thread_info; /* 0x029c */
__u32 kernel_stack; /* 0x02a0 */
/* Interrupt and panic stack. */
__u32 async_stack; /* 0x029c */
__u32 panic_stack; /* 0x02a0 */
__u32 async_stack; /* 0x02a4 */
__u32 panic_stack; /* 0x02a8 */
/* Address space pointer. */
__u32 kernel_asce; /* 0x02a4 */
__u32 user_asce; /* 0x02a8 */
__u32 user_exec_asce; /* 0x02ac */
__u32 kernel_asce; /* 0x02ac */
__u32 user_asce; /* 0x02b0 */
__u32 user_exec_asce; /* 0x02b4 */
/* SMP info area */
struct cpuid cpu_id; /* 0x02b0 */
__u32 cpu_nr; /* 0x02b8 */
__u32 softirq_pending; /* 0x02bc */
__u32 percpu_offset; /* 0x02c0 */
__u32 ext_call_fast; /* 0x02c4 */
__u64 int_clock; /* 0x02c8 */
__u64 clock_comparator; /* 0x02d0 */
__u32 machine_flags; /* 0x02d8 */
__u32 ftrace_func; /* 0x02dc */
__u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
__u64 mcck_clock; /* 0x02d0 */
__u64 clock_comparator; /* 0x02d8 */
__u32 machine_flags; /* 0x02e0 */
__u32 ftrace_func; /* 0x02e4 */
__u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */
/* Interrupt response block */
__u8 irb[64]; /* 0x0300 */
......@@ -189,14 +190,14 @@ struct _lowcore {
__u32 data_exc_code; /* 0x0090 */
__u16 mon_class_num; /* 0x0094 */
__u16 per_perc_atmid; /* 0x0096 */
addr_t per_address; /* 0x0098 */
__u64 per_address; /* 0x0098 */
__u8 exc_access_id; /* 0x00a0 */
__u8 per_access_id; /* 0x00a1 */
__u8 op_access_id; /* 0x00a2 */
__u8 ar_access_id; /* 0x00a3 */
__u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
addr_t trans_exc_code; /* 0x00a8 */
addr_t monitor_code; /* 0x00b0 */
__u64 trans_exc_code; /* 0x00a8 */
__u64 monitor_code; /* 0x00b0 */
__u16 subchannel_id; /* 0x00b8 */
__u16 subchannel_nr; /* 0x00ba */
__u32 io_int_parm; /* 0x00bc */
......@@ -207,7 +208,7 @@ struct _lowcore {
__u32 mcck_interruption_code[2]; /* 0x00e8 */
__u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
__u32 external_damage_code; /* 0x00f4 */
addr_t failing_storage_address; /* 0x00f8 */
__u64 failing_storage_address; /* 0x00f8 */
__u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
__u64 breaking_event_addr; /* 0x0110 */
__u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
......@@ -233,39 +234,41 @@ struct _lowcore {
/* CPU accounting and timing values. */
__u64 sync_enter_timer; /* 0x02a0 */
__u64 async_enter_timer; /* 0x02a8 */
__u64 exit_timer; /* 0x02b0 */
__u64 user_timer; /* 0x02b8 */
__u64 system_timer; /* 0x02c0 */
__u64 steal_timer; /* 0x02c8 */
__u64 last_update_timer; /* 0x02d0 */
__u64 last_update_clock; /* 0x02d8 */
__u64 mcck_enter_timer; /* 0x02b0 */
__u64 exit_timer; /* 0x02b8 */
__u64 user_timer; /* 0x02c0 */
__u64 system_timer; /* 0x02c8 */
__u64 steal_timer; /* 0x02d0 */
__u64 last_update_timer; /* 0x02d8 */
__u64 last_update_clock; /* 0x02e0 */
/* Current process. */
__u64 current_task; /* 0x02e0 */
__u64 thread_info; /* 0x02e8 */
__u64 kernel_stack; /* 0x02f0 */
__u64 current_task; /* 0x02e8 */
__u64 thread_info; /* 0x02f0 */
__u64 kernel_stack; /* 0x02f8 */
/* Interrupt and panic stack. */
__u64 async_stack; /* 0x02f8 */
__u64 panic_stack; /* 0x0300 */
__u64 async_stack; /* 0x0300 */
__u64 panic_stack; /* 0x0308 */
/* Address space pointer. */
__u64 kernel_asce; /* 0x0308 */
__u64 user_asce; /* 0x0310 */
__u64 user_exec_asce; /* 0x0318 */
__u64 kernel_asce; /* 0x0310 */
__u64 user_asce; /* 0x0318 */
__u64 user_exec_asce; /* 0x0320 */
/* SMP info area */
struct cpuid cpu_id; /* 0x0320 */
__u32 cpu_nr; /* 0x0328 */
__u32 softirq_pending; /* 0x032c */
__u64 percpu_offset; /* 0x0330 */
__u64 ext_call_fast; /* 0x0338 */
__u64 int_clock; /* 0x0340 */
__u64 clock_comparator; /* 0x0348 */
__u64 vdso_per_cpu_data; /* 0x0350 */
__u64 machine_flags; /* 0x0358 */
__u64 ftrace_func; /* 0x0360 */
__u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */
__u64 mcck_clock; /* 0x0348 */
__u64 clock_comparator; /* 0x0350 */
__u64 vdso_per_cpu_data; /* 0x0358 */
__u64 machine_flags; /* 0x0360 */
__u64 ftrace_func; /* 0x0368 */
__u64 sie_hook; /* 0x0370 */
__u64 cmf_hpp; /* 0x0378 */
/* Interrupt response block. */
__u8 irb[64]; /* 0x0380 */
......
......@@ -328,8 +328,8 @@ struct pt_regs
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned long orig_gpr2;
unsigned short svcnr;
unsigned short ilc;
unsigned short svcnr;
};
#endif
......@@ -436,6 +436,7 @@ typedef struct
#define PTRACE_PEEKDATA_AREA 0x5003
#define PTRACE_POKETEXT_AREA 0x5004
#define PTRACE_POKEDATA_AREA 0x5005
#define PTRACE_GET_LAST_BREAK 0x5006
/*
* PT_PROT definition is loosely based on hppa bsd definition in
......
......@@ -368,14 +368,12 @@ struct qdio_initialize {
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_PCI_OUT 0x10
extern int qdio_initialize(struct qdio_initialize *);
extern int qdio_allocate(struct qdio_initialize *);
extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, unsigned int bufnr, unsigned int count);
extern int qdio_cleanup(struct ccw_device*, int);
extern int qdio_shutdown(struct ccw_device*, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*);
......
......@@ -2,7 +2,7 @@
* include/asm-s390/setup.h
*
* S390 version
* Copyright IBM Corp. 1999,2006
* Copyright IBM Corp. 1999,2010
*/
#ifndef _ASM_S390_SETUP_H
......@@ -72,6 +72,7 @@ extern unsigned int user_mode;
#define MACHINE_FLAG_HPAGE (1UL << 10)
#define MACHINE_FLAG_PFMF (1UL << 11)
#define MACHINE_FLAG_LPAR (1UL << 12)
#define MACHINE_FLAG_SPP (1UL << 13)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
......@@ -88,6 +89,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_MVCOS (0)
#define MACHINE_HAS_HPAGE (0)
#define MACHINE_HAS_PFMF (0)
#define MACHINE_HAS_SPP (0)
#else /* __s390x__ */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
......@@ -97,6 +99,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE)
#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
#endif /* __s390x__ */
#define ZFCPDUMP_HSA_SIZE (32UL<<20)
......
......@@ -459,11 +459,6 @@ extern void (*_machine_power_off)(void);
#define arch_align_stack(x) (x)
#ifdef CONFIG_TRACE_IRQFLAGS
extern psw_t sysc_restore_trace_psw;
extern psw_t io_restore_trace_psw;
#endif
static inline int tprot(unsigned long addr)
{
int rc = -EFAULT;
......
......@@ -50,6 +50,7 @@ struct thread_info {
struct restart_block restart_block;
__u64 user_timer;
__u64 system_timer;
unsigned long last_break; /* last breaking-event-address. */
};
/*
......
......@@ -61,11 +61,15 @@ static inline unsigned long long get_clock (void)
return clk;
}
static inline void get_clock_ext(char *clk)
{
asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
}
static inline unsigned long long get_clock_xt(void)
{
unsigned char clk[16];
asm volatile("stcke %0" : "=Q" (clk) : : "cc");
get_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
}
......
......@@ -7,8 +7,10 @@
const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
#define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
int topology_set_cpu_management(int fc);
......
......@@ -39,6 +39,7 @@ int main(void)
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
DEFINE(__TI_last_break, offsetof(struct thread_info, last_break));
BLANK();
DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
......@@ -112,6 +113,7 @@ int main(void)
DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer));
DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
......@@ -126,10 +128,12 @@ int main(void)
DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
......
......@@ -655,6 +655,7 @@ debug_open(struct inode *inode, struct file *file)
p_info->act_entry_offset = 0;
file->private_data = p_info;
debug_info_get(debug_info);
nonseekable_open(inode, file);
out:
mutex_unlock(&debug_mutex);
return rc;
......
......@@ -356,6 +356,7 @@ static __init void detect_machine_facilities(void)
{
#ifdef CONFIG_64BIT
unsigned int facilities;
unsigned long long facility_bits;
facilities = stfl();
if (facilities & (1 << 28))
......@@ -364,6 +365,9 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
if (facilities & (1 << 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
if ((stfle(&facility_bits, 1) > 0) &&
(facility_bits & (1ULL << (63 - 40))))
S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
#endif
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -328,8 +328,8 @@ iplstart:
#
# reset files in VM reader
#
stidp __LC_CPUID # store cpuid
tm __LC_CPUID,0xff # running VM ?
stidp __LC_SAVE_AREA # store cpuid
tm __LC_SAVE_AREA,0xff # running VM ?
bno .Lnoreset
la %r2,.Lreset
lhi %r3,26
......
......@@ -255,7 +255,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
int umode;
nmi_enter();
s390_idle_check();
s390_idle_check(regs, S390_lowcore.mcck_clock,
S390_lowcore.mcck_enter_timer);
mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
mcck = &__get_cpu_var(cpu_mcck);
......
......@@ -18,24 +18,42 @@
#include <asm/lowcore.h>
#include <asm/param.h>
static DEFINE_PER_CPU(struct cpuid, cpu_id);
/*
* cpu_init - initializes state that is per-CPU.
*/
void __cpuinit cpu_init(void)
{
struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
get_cpu_id(id);
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
}
/*
* print_cpu_info - print basic information about a cpu
*/
void __cpuinit print_cpu_info(void)
{
struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
pr_info("Processor %d started, address %d, identification %06X\n",
S390_lowcore.cpu_nr, S390_lowcore.cpu_addr,
S390_lowcore.cpu_id.ident);
S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident);
}
/*
* show_cpuinfo - Get information on one CPU for use by procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[10] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs"
};
struct _lowcore *lc;
unsigned long n = (unsigned long) v - 1;
int i;
......@@ -55,19 +73,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
if (cpu_online(n)) {
#ifdef CONFIG_SMP
lc = (smp_processor_id() == n) ?
&S390_lowcore : lowcore_ptr[n];
#else
lc = &S390_lowcore;
#endif
struct cpuid *id = &per_cpu(cpu_id, n);
seq_printf(m, "processor %li: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
n, lc->cpu_id.version,
lc->cpu_id.ident,
lc->cpu_id.machine);
n, id->version, id->ident, id->machine);
}
preempt_enable();
return 0;
......
......@@ -57,6 +57,7 @@
enum s390_regset {
REGSET_GENERAL,
REGSET_FP,
REGSET_LAST_BREAK,
REGSET_GENERAL_EXTENDED,
};
......@@ -381,6 +382,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
copied += sizeof(unsigned long);
}
return 0;
case PTRACE_GET_LAST_BREAK:
put_user(task_thread_info(child)->last_break,
(unsigned long __user *) data);
return 0;
default:
/* Removing high order bit from addr (only for 31 bit). */
addr &= PSW_ADDR_INSN;
......@@ -633,6 +638,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
copied += sizeof(unsigned int);
}
return 0;
case PTRACE_GET_LAST_BREAK:
put_user(task_thread_info(child)->last_break,
(unsigned int __user *) data);
return 0;
}
return compat_ptrace_request(child, request, addr, data);
}
......@@ -797,6 +806,28 @@ static int s390_fpregs_set(struct task_struct *target,
return rc;
}
#ifdef CONFIG_64BIT
static int s390_last_break_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (count > 0) {
if (kbuf) {
unsigned long *k = kbuf;
*k = task_thread_info(target)->last_break;
} else {
unsigned long __user *u = ubuf;
if (__put_user(task_thread_info(target)->last_break, u))
return -EFAULT;
}
}
return 0;
}
#endif
static const struct user_regset s390_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
......@@ -814,6 +845,15 @@ static const struct user_regset s390_regsets[] = {
.get = s390_fpregs_get,
.set = s390_fpregs_set,
},
#ifdef CONFIG_64BIT
[REGSET_LAST_BREAK] = {
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
.size = sizeof(long),
.align = sizeof(long),
.get = s390_last_break_get,
},
#endif
};
static const struct user_regset_view user_s390_view = {
......@@ -948,6 +988,27 @@ static int s390_compat_regs_high_set(struct task_struct *target,
return rc;
}
static int s390_compat_last_break_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
compat_ulong_t last_break;
if (count > 0) {
last_break = task_thread_info(target)->last_break;
if (kbuf) {
unsigned long *k = kbuf;
*k = last_break;
} else {
unsigned long __user *u = ubuf;
if (__put_user(last_break, u))
return -EFAULT;
}
}
return 0;
}
static const struct user_regset s390_compat_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
......@@ -965,6 +1026,13 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_fpregs_get,
.set = s390_fpregs_set,
},
[REGSET_LAST_BREAK] = {
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
.size = sizeof(long),
.align = sizeof(long),
.get = s390_compat_last_break_get,
},
[REGSET_GENERAL_EXTENDED] = {
.core_note_type = NT_S390_HIGH_GPRS,
.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
......
......@@ -120,7 +120,8 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
s390_idle_check();
s390_idle_check(regs, S390_lowcore.int_clock,
S390_lowcore.async_enter_timer);
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
......
......@@ -2,7 +2,7 @@
* arch/s390/kernel/setup.c
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Copyright (C) IBM Corp. 1999,2010
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
......@@ -112,22 +112,6 @@ static struct resource data_resource = {
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
/*
* cpu_init() initializes state that is per-CPU.
*/
void __cpuinit cpu_init(void)
{
/*
* Store processor id in lowcore (used e.g. in timer_interrupt)
*/
get_cpu_id(&S390_lowcore.cpu_id);
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
}
/*
* condev= and conmode= setup parameter.
*/
......@@ -385,10 +369,6 @@ static void setup_addressing_mode(void)
pr_info("Address spaces switched, "
"mvcos not available\n");
}
#ifdef CONFIG_TRACE_IRQFLAGS
sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
#endif
}
static void __init
......@@ -421,6 +401,7 @@ setup_lowcore(void)
lc->io_new_psw.mask = psw_kernel_bits;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
lc->cmf_hpp = -1ULL;
lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
lc->async_stack = (unsigned long)
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
......@@ -695,6 +676,7 @@ static void __init setup_hwcaps(void)
static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
unsigned long long facility_list_extended;
unsigned int facility_list;
struct cpuid cpu_id;
int i;
facility_list = stfl();
......@@ -756,7 +738,8 @@ static void __init setup_hwcaps(void)
*/
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
switch (S390_lowcore.cpu_id.machine) {
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x9672:
#if !defined(CONFIG_64BIT)
default: /* Use "g5" as default for 31 bit kernels. */
......
......@@ -313,6 +313,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
To avoid breaking binary compatibility, they are passed as args. */
regs->gprs[4] = current->thread.trap_no;
regs->gprs[5] = current->thread.prot_addr;
regs->gprs[6] = task_thread_info(current)->last_break;
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
......@@ -376,6 +377,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
regs->gprs[5] = task_thread_info(current)->last_break;
return 0;
give_sigsegv:
......
......@@ -37,7 +37,8 @@ struct tl_cpu {
};
struct tl_container {
unsigned char reserved[8];
unsigned char reserved[7];
unsigned char id;
};
union tl_entry {
......@@ -58,6 +59,7 @@ struct tl_info {
struct core_info {
struct core_info *next;
unsigned char id;
cpumask_t mask;
};
......@@ -73,6 +75,7 @@ static DECLARE_WORK(topology_work, topology_work_fn);
static DEFINE_SPINLOCK(topology_lock);
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
static cpumask_t cpu_coregroup_map(unsigned int cpu)
{
......@@ -116,6 +119,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
for_each_present_cpu(lcpu) {
if (cpu_logical_map(lcpu) == rcpu) {
cpu_set(lcpu, core->mask);
cpu_core_id[lcpu] = core->id;
smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
}
......@@ -158,6 +162,7 @@ static void tl_to_cores(struct tl_info *info)
break;
case 1:
core = core->next;
core->id = tle->container.id;
break;
case 0:
add_cpus_to_core(&tle->cpu, core);
......
......@@ -46,13 +46,7 @@
pgm_check_handler_t *pgm_check_table[128];
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_PROCESS_DEBUG
int sysctl_userprocess_debug = 1;
#else
int sysctl_userprocess_debug = 0;
#endif
#endif
int show_unhandled_signals;
extern pgm_check_handler_t do_protection_exception;
extern pgm_check_handler_t do_dat_exception;
......@@ -315,18 +309,19 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(SIGSEGV);
}
static void inline
report_user_fault(long interruption_code, struct pt_regs *regs)
static void inline report_user_fault(struct pt_regs *regs, long int_code,
int signr)
{
#if defined(CONFIG_SYSCTL)
if (!sysctl_userprocess_debug)
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
#endif
#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
printk("User process fault: interruption code 0x%lX\n",
interruption_code);
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk("User process fault: interruption code 0x%lX ", int_code);
print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
printk("\n");
show_regs(regs);
#endif
}
int is_valid_bugaddr(unsigned long addr)
......@@ -354,7 +349,7 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
tsk->thread.trap_no = interruption_code & 0xffff;
force_sig_info(signr, info, tsk);
report_user_fault(interruption_code, regs);
report_user_fault(regs, interruption_code, signr);
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
......@@ -390,8 +385,8 @@ static void default_trap_handler(struct pt_regs * regs, long interruption_code)
{
if (regs->psw.mask & PSW_MASK_PSTATE) {
local_irq_enable();
report_user_fault(regs, interruption_code, SIGSEGV);
do_exit(SIGSEGV);
report_user_fault(interruption_code, regs);
} else
die("Unknown program exception", regs, interruption_code);
}
......
......@@ -102,11 +102,7 @@ static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
/*
* Allocate/free per cpu vdso data.
*/
#ifdef CONFIG_64BIT
#define SEGMENT_ORDER 2
#else
#define SEGMENT_ORDER 1
#endif
int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
{
......
......@@ -121,32 +121,35 @@ void account_system_vtime(struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(account_system_vtime);
void vtime_start_cpu(void)
void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
__u64 idle_time, expires;
if (idle->idle_enter == 0ULL)
return;
/* Account time spent with enabled wait psw loaded as idle time. */
idle_time = S390_lowcore.int_clock - idle->idle_enter;
idle_time = int_clock - idle->idle_enter;
account_idle_time(idle_time);
S390_lowcore.steal_timer +=
idle->idle_enter - S390_lowcore.last_update_clock;
S390_lowcore.last_update_clock = S390_lowcore.int_clock;
S390_lowcore.last_update_clock = int_clock;
/* Account system time spent going idle. */
S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer;
S390_lowcore.last_update_timer = enter_timer;
/* Restart vtime CPU timer */
if (vq->do_spt) {
/* Program old expire value but first save progress. */
expires = vq->idle - S390_lowcore.async_enter_timer;
expires = vq->idle - enter_timer;
expires += get_vtimer();
set_vtimer(expires);
} else {
/* Don't account the CPU timer delta while the cpu was idle. */
vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
vq->elapsed -= vq->idle - enter_timer;
}
idle->sequence++;
......
......@@ -33,6 +33,17 @@ config KVM
If unsure, say N.
config KVM_AWARE_CMF
depends on KVM
bool "KVM aware sampling"
---help---
This option enhances the sampling data from the CPU Measurement
Facility with additional information, that allows to distinguish
guest(s) and host when using the kernel based virtual machine
functionality.
If unsure, say N.
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/vhost/Kconfig
......
/*
* sie64a.S - low level sie call
*
* Copyright IBM Corp. 2008
* Copyright IBM Corp. 2008,2010
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
*/
#include <linux/errno.h>
#include <asm/asm-offsets.h>
#include <asm/setup.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
/*
* offsets into stackframe
* SP_ = offsets into stack sie64 is called with
* SPI_ = offsets into irq stack
*/
SP_GREGS = __SF_EMPTY
SP_HOOK = __SF_EMPTY+8
SP_GPP = __SF_EMPTY+16
SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
SP_R5 = 5 * 8 # offset into stackframe
SP_R6 = 6 * 8
.macro SPP newpp
#ifdef CONFIG_KVM_AWARE_CMF
tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
jz 0f
.insn s,0xb2800000,\newpp
0:
#endif
.endm
sie_irq_handler:
SPP __LC_CMF_HPP # set host id
larl %r2,sie_inst
clg %r2,SPI_PSW+8(0,%r15) # intercepted sie
jne 1f
xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
lg %r2,__LC_THREAD_INFO # pointer thread_info struct
tm __TI_flags+7(%r2),_TIF_EXIT_SIE
jz 0f
larl %r2,sie_exit # work pending, leave sie
stg %r2,__LC_RETURN_PSW+8
br %r14
0: larl %r2,sie_reenter # re-enter with guest id
stg %r2,__LC_RETURN_PSW+8
1: br %r14
/*
* sie64a calling convention:
......@@ -23,23 +63,34 @@ SP_R6 = 6 * 8
*/
.globl sie64a
sie64a:
lgr %r5,%r3
stmg %r5,%r14,SP_R5(%r15) # save register on entry
stg %r3,SP_GREGS(%r15) # save guest register save area
stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry
lgr %r14,%r2 # pointer to sie control block
larl %r5,sie_irq_handler
stg %r2,SP_GPP(%r15)
stg %r5,SP_HOOK(%r15) # save hook target
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
sie_reenter:
mvc __LC_SIE_HOOK(8),SP_HOOK(%r15)
SPP SP_GPP(%r15) # set guest id
sie_inst:
sie 0(%r14)
lg %r14,SP_R5(%r15)
xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
SPP __LC_CMF_HPP # set host id
sie_exit:
lg %r14,SP_GREGS(%r15)
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
lghi %r2,0
lmg %r6,%r14,SP_R6(%r15)
lmg %r6,%r14,__SF_GPRS(%r15)
br %r14
sie_err:
lg %r14,SP_R5(%r15)
xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
SPP __LC_CMF_HPP # set host id
lg %r14,SP_GREGS(%r15)
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
lghi %r2,-EFAULT
lmg %r6,%r14,SP_R6(%r15)
lmg %r6,%r14,__SF_GPRS(%r15)
br %r14
.section __ex_table,"a"
......
......@@ -48,10 +48,6 @@
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
#ifdef CONFIG_SYSCTL
extern int sysctl_userprocess_debug;
#endif
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
......@@ -120,6 +116,22 @@ static inline int user_space_fault(unsigned long trans_exc_code)
return trans_exc_code != 3;
}
static inline void report_user_fault(struct pt_regs *regs, long int_code,
int signr, unsigned long address)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk("User process fault: interruption code 0x%lX ", int_code);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk("\n");
printk("failing address: %lX\n", address);
show_regs(regs);
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
......@@ -133,17 +145,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
address = trans_exc_code & __FAIL_ADDR_MASK;
current->thread.prot_addr = address;
current->thread.trap_no = int_code;
#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
#if defined(CONFIG_SYSCTL)
if (sysctl_userprocess_debug)
#endif
{
printk("User process fault: interruption code 0x%lX\n",
int_code);
printk("failing address: %lX\n", address);
show_regs(regs);
}
#endif
report_user_fault(regs, int_code, SIGSEGV, address);
si.si_signo = SIGSEGV;
si.si_code = si_code;
si.si_addr = (void __user *) address;
......
......@@ -65,6 +65,7 @@ static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
......@@ -115,6 +116,7 @@ struct dasd_device *dasd_alloc_device(void)
device->timer.data = (unsigned long) device;
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
INIT_WORK(&device->reload_device, do_reload_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
......@@ -520,6 +522,26 @@ void dasd_kick_device(struct dasd_device *device)
schedule_work(&device->kick_work);
}
/*
* dasd_reload_device will schedule a call do do_reload_device to the kernel
* event daemon.
*/
static void do_reload_device(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device,
reload_device);
device->discipline->reload(device);
dasd_put_device(device);
}
void dasd_reload_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_reload_device to the kernel event daemon. */
schedule_work(&device->reload_device);
}
EXPORT_SYMBOL(dasd_reload_device);
/*
* dasd_restore_device will schedule a call do do_restore_device to the kernel
* event daemon.
......
......@@ -1418,9 +1418,29 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
struct dasd_ccw_req *erp)
{
struct dasd_ccw_req *cqr = erp->refers;
char *sense;
if (cqr->block &&
(cqr->block->base != cqr->startdev)) {
sense = dasd_get_sense(&erp->refers->irb);
/*
* dynamic pav may have changed base alias mapping
*/
if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
&& (sense[0] == 0x10) && (sense[7] == 0x0F)
&& (sense[8] == 0x67)) {
/*
* remove device from alias handling to prevent new
* requests from being scheduled on the
* wrong alias device
*/
dasd_alias_remove_device(cqr->startdev);
/* schedule worker to reload device */
dasd_reload_device(cqr->startdev);
}
if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
"ERP on alias device for request %p,"
......
......@@ -190,20 +190,21 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
int is_lcu_known;
struct dasd_uid *uid;
struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
uid = &private->uid;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&aliastree.lock, flags);
is_lcu_known = 1;
server = _find_server(uid);
server = _find_server(&uid);
if (!server) {
spin_unlock_irqrestore(&aliastree.lock, flags);
newserver = _allocate_server(uid);
newserver = _allocate_server(&uid);
if (IS_ERR(newserver))
return PTR_ERR(newserver);
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(uid);
server = _find_server(&uid);
if (!server) {
list_add(&newserver->server, &aliastree.serverlist);
server = newserver;
......@@ -214,14 +215,14 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
}
}
lcu = _find_lcu(server, uid);
lcu = _find_lcu(server, &uid);
if (!lcu) {
spin_unlock_irqrestore(&aliastree.lock, flags);
newlcu = _allocate_lcu(uid);
newlcu = _allocate_lcu(&uid);
if (IS_ERR(newlcu))
return PTR_ERR(newlcu);
spin_lock_irqsave(&aliastree.lock, flags);
lcu = _find_lcu(server, uid);
lcu = _find_lcu(server, &uid);
if (!lcu) {
list_add(&newlcu->lcu, &server->lculist);
lcu = newlcu;
......@@ -256,20 +257,20 @@ void dasd_alias_lcu_setup_complete(struct dasd_device *device)
unsigned long flags;
struct alias_server *server;
struct alias_lcu *lcu;
struct dasd_uid *uid;
struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
uid = &private->uid;
device->discipline->get_uid(device, &uid);
lcu = NULL;
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(uid);
server = _find_server(&uid);
if (server)
lcu = _find_lcu(server, uid);
lcu = _find_lcu(server, &uid);
spin_unlock_irqrestore(&aliastree.lock, flags);
if (!lcu) {
DBF_EVENT_DEVID(DBF_ERR, device->cdev,
"could not find lcu for %04x %02x",
uid->ssid, uid->real_unit_addr);
uid.ssid, uid.real_unit_addr);
WARN_ON(1);
return;
}
......@@ -282,20 +283,20 @@ void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
unsigned long flags;
struct alias_server *server;
struct alias_lcu *lcu;
struct dasd_uid *uid;
struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
uid = &private->uid;
device->discipline->get_uid(device, &uid);
lcu = NULL;
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(uid);
server = _find_server(&uid);
if (server)
lcu = _find_lcu(server, uid);
lcu = _find_lcu(server, &uid);
spin_unlock_irqrestore(&aliastree.lock, flags);
if (!lcu) {
DBF_EVENT_DEVID(DBF_ERR, device->cdev,
"could not find lcu for %04x %02x",
uid->ssid, uid->real_unit_addr);
uid.ssid, uid.real_unit_addr);
WARN_ON(1);
return;
}
......@@ -314,9 +315,11 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
struct alias_lcu *lcu;
struct alias_server *server;
int was_pending;
struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
......@@ -353,7 +356,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
_schedule_lcu_update(lcu, NULL);
spin_unlock(&lcu->lock);
}
server = _find_server(&private->uid);
server = _find_server(&uid);
if (server && list_empty(&server->lculist)) {
list_del(&server->server);
_free_server(server);
......@@ -366,19 +369,30 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
* in the lcu is up to date and will update the device uid before
* adding it to a pav group.
*/
static int _add_device_to_lcu(struct alias_lcu *lcu,
struct dasd_device *device)
struct dasd_device *device,
struct dasd_device *pos)
{
struct dasd_eckd_private *private;
struct alias_pav_group *group;
struct dasd_uid *uid;
struct dasd_uid uid;
unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
uid = &private->uid;
uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
dasd_set_uid(device->cdev, &private->uid);
/* only lock if not already locked */
if (device != pos)
spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
CDEV_NESTED_SECOND);
private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
private->uid.base_unit_addr =
lcu->uac->unit[private->uid.real_unit_addr].base_ua;
uid = private->uid;
if (device != pos)
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
......@@ -386,25 +400,25 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
return 0;
}
group = _find_group(lcu, uid);
group = _find_group(lcu, &uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
if (!group)
return -ENOMEM;
memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
group->uid.ssid = uid->ssid;
if (uid->type == UA_BASE_DEVICE)
group->uid.base_unit_addr = uid->real_unit_addr;
memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
group->uid.ssid = uid.ssid;
if (uid.type == UA_BASE_DEVICE)
group->uid.base_unit_addr = uid.real_unit_addr;
else
group->uid.base_unit_addr = uid->base_unit_addr;
memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
group->uid.base_unit_addr = uid.base_unit_addr;
memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
INIT_LIST_HEAD(&group->group);
INIT_LIST_HEAD(&group->baselist);
INIT_LIST_HEAD(&group->aliaslist);
list_add(&group->group, &lcu->grouplist);
}
if (uid->type == UA_BASE_DEVICE)
if (uid.type == UA_BASE_DEVICE)
list_move(&device->alias_list, &group->baselist);
else
list_move(&device->alias_list, &group->aliaslist);
......@@ -525,7 +539,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
if (rc)
return rc;
spin_lock_irqsave(&lcu->lock, flags);
/* need to take cdev lock before lcu lock */
spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
CDEV_NESTED_FIRST);
spin_lock(&lcu->lock);
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
......@@ -542,9 +559,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
alias_list) {
_add_device_to_lcu(lcu, device);
_add_device_to_lcu(lcu, device, refdev);
}
spin_unlock_irqrestore(&lcu->lock, flags);
spin_unlock(&lcu->lock);
spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
return 0;
}
......@@ -628,9 +646,12 @@ int dasd_alias_add_device(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
rc = 0;
spin_lock_irqsave(&lcu->lock, flags);
/* need to take cdev lock before lcu lock */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
spin_lock(&lcu->lock);
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device);
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
lcu->flags |= UPDATE_PENDING;
}
......@@ -638,10 +659,19 @@ int dasd_alias_add_device(struct dasd_device *device)
list_move(&device->alias_list, &lcu->active_devices);
_schedule_lcu_update(lcu, device);
}
spin_unlock_irqrestore(&lcu->lock, flags);
spin_unlock(&lcu->lock);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return rc;
}
int dasd_alias_update_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
private = (struct dasd_eckd_private *) device->private;
private->lcu->flags |= UPDATE_PENDING;
return dasd_alias_add_device(device);
}
int dasd_alias_remove_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
......@@ -740,19 +770,30 @@ static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device;
struct dasd_eckd_private *private;
unsigned long flags;
/* active and inactive list can contain alias as well as base devices */
list_for_each_entry(device, &lcu->active_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
if (private->uid.type != UA_BASE_DEVICE)
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (private->uid.type != UA_BASE_DEVICE) {
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
continue;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
if (private->uid.type != UA_BASE_DEVICE)
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (private->uid.type != UA_BASE_DEVICE) {
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
continue;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
......
......@@ -49,7 +49,6 @@ struct dasd_devmap {
unsigned int devindex;
unsigned short features;
struct dasd_device *device;
struct dasd_uid uid;
};
/*
......@@ -936,42 +935,46 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
static ssize_t
dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t dasd_alias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
int alias;
struct dasd_device *device;
struct dasd_uid uid;
devmap = dasd_find_busid(dev_name(dev));
spin_lock(&dasd_devmap_lock);
if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
spin_unlock(&dasd_devmap_lock);
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sprintf(buf, "0\n");
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
if (uid.type == UA_BASE_PAV_ALIAS ||
uid.type == UA_HYPER_PAV_ALIAS)
return sprintf(buf, "1\n");
}
if (devmap->uid.type == UA_BASE_PAV_ALIAS ||
devmap->uid.type == UA_HYPER_PAV_ALIAS)
alias = 1;
else
alias = 0;
spin_unlock(&dasd_devmap_lock);
return sprintf(buf, alias ? "1\n" : "0\n");
dasd_put_device(device);
return sprintf(buf, "0\n");
}
static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
static ssize_t
dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t dasd_vendor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
struct dasd_device *device;
struct dasd_uid uid;
char *vendor;
devmap = dasd_find_busid(dev_name(dev));
spin_lock(&dasd_devmap_lock);
if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
vendor = devmap->uid.vendor;
else
device = dasd_device_from_cdev(to_ccwdev(dev));
vendor = "";
spin_unlock(&dasd_devmap_lock);
if (IS_ERR(device))
return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid))
vendor = uid.vendor;
dasd_put_device(device);
return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
}
......@@ -985,48 +988,51 @@ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
static ssize_t
dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
struct dasd_device *device;
struct dasd_uid uid;
char uid_string[UID_STRLEN];
char ua_string[3];
struct dasd_uid *uid;
devmap = dasd_find_busid(dev_name(dev));
spin_lock(&dasd_devmap_lock);
if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
spin_unlock(&dasd_devmap_lock);
return sprintf(buf, "\n");
}
uid = &devmap->uid;
switch (uid->type) {
device = dasd_device_from_cdev(to_ccwdev(dev));
uid_string[0] = 0;
if (IS_ERR(device))
return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
switch (uid.type) {
case UA_BASE_DEVICE:
sprintf(ua_string, "%02x", uid->real_unit_addr);
snprintf(ua_string, sizeof(ua_string), "%02x",
uid.real_unit_addr);
break;
case UA_BASE_PAV_ALIAS:
sprintf(ua_string, "%02x", uid->base_unit_addr);
snprintf(ua_string, sizeof(ua_string), "%02x",
uid.base_unit_addr);
break;
case UA_HYPER_PAV_ALIAS:
sprintf(ua_string, "xx");
snprintf(ua_string, sizeof(ua_string), "xx");
break;
default:
/* should not happen, treat like base device */
sprintf(ua_string, "%02x", uid->real_unit_addr);
snprintf(ua_string, sizeof(ua_string), "%02x",
uid.real_unit_addr);
break;
}
if (strlen(uid->vduit) > 0)
if (strlen(uid.vduit) > 0)
snprintf(uid_string, sizeof(uid_string),
"%s.%s.%04x.%s.%s",
uid->vendor, uid->serial,
uid->ssid, ua_string,
uid->vduit);
uid.vendor, uid.serial, uid.ssid, ua_string,
uid.vduit);
else
snprintf(uid_string, sizeof(uid_string),
"%s.%s.%04x.%s",
uid->vendor, uid->serial,
uid->ssid, ua_string);
spin_unlock(&dasd_devmap_lock);
uid.vendor, uid.serial, uid.ssid, ua_string);
}
dasd_put_device(device);
return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
}
static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
/*
......@@ -1093,50 +1099,6 @@ static struct attribute_group dasd_attr_group = {
.attrs = dasd_attrs,
};
/*
* Return copy of the device unique identifier.
*/
int
dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
{
struct dasd_devmap *devmap;
devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock);
*uid = devmap->uid;
spin_unlock(&dasd_devmap_lock);
return 0;
}
EXPORT_SYMBOL_GPL(dasd_get_uid);
/*
* Register the given device unique identifier into devmap struct.
* In addition check if the related storage server subsystem ID is already
* contained in the dasd_server_ssid_list. If subsystem ID is not contained,
* create new entry.
* Return 0 if server was already in serverlist,
* 1 if the server was added successful
* <0 in case of error.
*/
int
dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
{
struct dasd_devmap *devmap;
devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock);
devmap->uid = *uid;
spin_unlock(&dasd_devmap_lock);
return 0;
}
EXPORT_SYMBOL_GPL(dasd_set_uid);
/*
* Return value of the specified feature.
*/
......
......@@ -692,18 +692,20 @@ dasd_eckd_cdl_reclen(int recid)
/*
* Generate device unique id that specifies the physical device.
*/
static int dasd_eckd_generate_uid(struct dasd_device *device,
struct dasd_uid *uid)
static int dasd_eckd_generate_uid(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_uid *uid;
int count;
unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
if (!private)
return -ENODEV;
if (!private->ned || !private->gneq)
return -ENODEV;
uid = &private->uid;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, private->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
......@@ -726,7 +728,23 @@ static int dasd_eckd_generate_uid(struct dasd_device *device,
private->vdsneq->uit[count]);
}
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
{
struct dasd_eckd_private *private;
unsigned long flags;
if (device->private) {
private = (struct dasd_eckd_private *)device->private;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
*uid = private->uid;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
return -EINVAL;
}
static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
......@@ -1088,6 +1106,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_block *block;
struct dasd_uid temp_uid;
int is_known, rc;
int readonly;
......@@ -1124,13 +1143,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err1;
/* Generate device unique id and register in devmap */
rc = dasd_eckd_generate_uid(device, &private->uid);
/* Generate device unique id */
rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err1;
dasd_set_uid(device->cdev, &private->uid);
if (private->uid.type == UA_BASE_DEVICE) {
dasd_eckd_get_uid(device, &temp_uid);
if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
......@@ -1451,6 +1470,7 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
cancel_work_sync(&device->reload_device);
return dasd_alias_remove_device(device);
};
......@@ -1709,10 +1729,27 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
{
char mask;
char *sense = NULL;
struct dasd_eckd_private *private;
private = (struct dasd_eckd_private *) device->private;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((scsw_dstat(&irb->scsw) & mask) == mask) {
/* for alias only and not in offline processing*/
if (!device->block && private->lcu &&
!test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
/*
* the state change could be caused by an alias
* reassignment remove device from alias handling
* to prevent new requests from being scheduled on
* the wrong alias device
*/
dasd_alias_remove_device(device);
/* schedule worker to reload device */
dasd_reload_device(device);
}
dasd_generic_handle_state_change(device);
return;
}
......@@ -3259,7 +3296,7 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
dasd_eckd_dump_sense_ccw(device, req, irb);
}
int dasd_eckd_pm_freeze(struct dasd_device *device)
static int dasd_eckd_pm_freeze(struct dasd_device *device)
{
/*
* the device should be disconnected from our LCU structure
......@@ -3272,7 +3309,7 @@ int dasd_eckd_pm_freeze(struct dasd_device *device)
return 0;
}
int dasd_eckd_restore_device(struct dasd_device *device)
static int dasd_eckd_restore_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_eckd_characteristics temp_rdc_data;
......@@ -3287,15 +3324,16 @@ int dasd_eckd_restore_device(struct dasd_device *device)
if (rc)
goto out_err;
/* Generate device unique id and register in devmap */
rc = dasd_eckd_generate_uid(device, &private->uid);
dasd_get_uid(device->cdev, &temp_uid);
dasd_eckd_get_uid(device, &temp_uid);
/* Generate device unique id */
rc = dasd_eckd_generate_uid(device);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
dev_err(&device->cdev->dev, "The UID of the DASD has "
"changed\n");
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (rc)
goto out_err;
dasd_set_uid(device->cdev, &private->uid);
/* register lcu with alias handling, enable PAV if this is a new lcu */
is_known = dasd_alias_make_device_known_to_lcu(device);
......@@ -3336,6 +3374,56 @@ int dasd_eckd_restore_device(struct dasd_device *device)
return -1;
}
static int dasd_eckd_reload_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
int rc, old_base;
char print_uid[60];
struct dasd_uid uid;
unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
old_base = private->uid.base_unit_addr;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err;
rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err;
/*
* update unit address configuration and
* add device to alias management
*/
dasd_alias_update_add_device(device);
dasd_eckd_get_uid(device, &uid);
if (old_base != uid.base_unit_addr) {
if (strlen(uid.vduit) > 0)
snprintf(print_uid, sizeof(print_uid),
"%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
uid.ssid, uid.base_unit_addr, uid.vduit);
else
snprintf(print_uid, sizeof(print_uid),
"%s.%s.%04x.%02x", uid.vendor, uid.serial,
uid.ssid, uid.base_unit_addr);
dev_info(&device->cdev->dev,
"An Alias device was reassigned to a new base device "
"with UID: %s\n", print_uid);
}
return 0;
out_err:
return -1;
}
static struct ccw_driver dasd_eckd_driver = {
.name = "dasd-eckd",
.owner = THIS_MODULE,
......@@ -3389,6 +3477,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
.ioctl = dasd_eckd_ioctl,
.freeze = dasd_eckd_pm_freeze,
.restore = dasd_eckd_restore_device,
.reload = dasd_eckd_reload_device,
.get_uid = dasd_eckd_get_uid,
};
static int __init
......
......@@ -426,7 +426,6 @@ struct alias_pav_group {
struct dasd_device *next;
};
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
......@@ -463,4 +462,5 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
void dasd_alias_lcu_setup_complete(struct dasd_device *);
void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
int dasd_alias_update_add_device(struct dasd_device *);
#endif /* DASD_ECKD_H */
......@@ -81,6 +81,10 @@ struct dasd_block;
#define DASD_SIM_MSG_TO_OP 0x03
#define DASD_SIM_LOG 0x0C
/* lock class for nested cdev lock */
#define CDEV_NESTED_FIRST 1
#define CDEV_NESTED_SECOND 2
/*
* SECTION: MACROs for klogd and s390 debug feature (dbf)
*/
......@@ -228,6 +232,24 @@ struct dasd_ccw_req {
/* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
/*
* Unique identifier for dasd device.
*/
#define UA_NOT_CONFIGURED 0x00
#define UA_BASE_DEVICE 0x01
#define UA_BASE_PAV_ALIAS 0x02
#define UA_HYPER_PAV_ALIAS 0x03
struct dasd_uid {
__u8 type;
char vendor[4];
char serial[15];
__u16 ssid;
__u8 real_unit_addr;
__u8 base_unit_addr;
char vduit[33];
};
/*
* the struct dasd_discipline is
* sth like a table of virtual functions, if you think of dasd_eckd
......@@ -312,28 +334,15 @@ struct dasd_discipline {
/* suspend/resume functions */
int (*freeze) (struct dasd_device *);
int (*restore) (struct dasd_device *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
/*
* Unique identifier for dasd device.
*/
#define UA_NOT_CONFIGURED 0x00
#define UA_BASE_DEVICE 0x01
#define UA_BASE_PAV_ALIAS 0x02
#define UA_HYPER_PAV_ALIAS 0x03
/* reload device after state change */
int (*reload) (struct dasd_device *);
struct dasd_uid {
__u8 type;
char vendor[4];
char serial[15];
__u16 ssid;
__u8 real_unit_addr;
__u8 base_unit_addr;
char vduit[33];
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
/*
* Notification numbers for extended error reporting notifications:
* The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
......@@ -386,6 +395,7 @@ struct dasd_device {
struct tasklet_struct tasklet;
struct work_struct kick_work;
struct work_struct restore_device;
struct work_struct reload_device;
struct timer_list timer;
debug_info_t *debug_area;
......@@ -582,6 +592,7 @@ void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
void dasd_restore_device(struct dasd_device *);
void dasd_reload_device(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
......@@ -629,8 +640,6 @@ void dasd_devmap_exit(void);
struct dasd_device *dasd_create_device(struct ccw_device *);
void dasd_delete_device(struct dasd_device *);
int dasd_get_uid(struct ccw_device *, struct dasd_uid *);
int dasd_set_uid(struct ccw_device *, struct dasd_uid *);
int dasd_get_feature(struct ccw_device *, int);
int dasd_set_feature(struct ccw_device *, int, int);
......
......@@ -148,13 +148,12 @@ config VMLOGRDR
This driver depends on the IUCV support driver.
config VMCP
tristate "Support for the z/VM CP interface (VM only)"
bool "Support for the z/VM CP interface"
depends on S390
help
Select this option if you want to be able to interact with the control
program on z/VM
config MONREADER
tristate "API for reading z/VM monitor service records"
depends on IUCV
......
......@@ -484,6 +484,7 @@ fs3270_open(struct inode *inode, struct file *filp)
raw3270_del_view(&fp->view);
goto out;
}
nonseekable_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
......
......@@ -49,7 +49,7 @@ static unsigned char ret_diacr[NR_DEAD] = {
struct kbd_data *
kbd_alloc(void) {
struct kbd_data *kbd;
int i, len;
int i;
kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
if (!kbd)
......@@ -59,12 +59,11 @@ kbd_alloc(void) {
goto out_kbd;
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
if (key_maps[i]) {
kbd->key_maps[i] =
kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL);
kbd->key_maps[i] = kmemdup(key_maps[i],
sizeof(u_short) * NR_KEYS,
GFP_KERNEL);
if (!kbd->key_maps[i])
goto out_maps;
memcpy(kbd->key_maps[i], key_maps[i],
sizeof(u_short)*NR_KEYS);
}
}
kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
......@@ -72,23 +71,21 @@ kbd_alloc(void) {
goto out_maps;
for (i = 0; i < ARRAY_SIZE(func_table); i++) {
if (func_table[i]) {
len = strlen(func_table[i]) + 1;
kbd->func_table[i] = kmalloc(len, GFP_KERNEL);
kbd->func_table[i] = kstrdup(func_table[i],
GFP_KERNEL);
if (!kbd->func_table[i])
goto out_func;
memcpy(kbd->func_table[i], func_table[i], len);
}
}
kbd->fn_handler =
kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
if (!kbd->fn_handler)
goto out_func;
kbd->accent_table =
kmalloc(sizeof(struct kbdiacruc)*MAX_DIACR, GFP_KERNEL);
kbd->accent_table = kmemdup(accent_table,
sizeof(struct kbdiacruc) * MAX_DIACR,
GFP_KERNEL);
if (!kbd->accent_table)
goto out_fn_handler;
memcpy(kbd->accent_table, accent_table,
sizeof(struct kbdiacruc)*MAX_DIACR);
kbd->accent_table_size = accent_table_size;
return kbd;
......
/*
* Copyright IBM Corp. 2004,2007
* Copyright IBM Corp. 2004,2010
* Interface implementation for communication with the z/VM control program
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
*
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
#define KMSG_COMPONENT "vmcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
......@@ -26,10 +22,6 @@
#include <asm/uaccess.h>
#include "vmcp.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
MODULE_DESCRIPTION("z/VM CP interface");
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
......@@ -197,11 +189,8 @@ static int __init vmcp_init(void)
{
int ret;
if (!MACHINE_IS_VM) {
pr_warning("The z/VM CP interface device driver cannot be "
"loaded without z/VM\n");
return -ENODEV;
}
if (!MACHINE_IS_VM)
return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
......@@ -214,19 +203,8 @@ static int __init vmcp_init(void)
}
ret = misc_register(&vmcp_dev);
if (ret) {
if (ret)
debug_unregister(vmcp_debug);
return ret;
}
return 0;
}
static void __exit vmcp_exit(void)
{
misc_deregister(&vmcp_dev);
debug_unregister(vmcp_debug);
}
module_init(vmcp_init);
module_exit(vmcp_exit);
device_initcall(vmcp_init);
......@@ -445,7 +445,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp)
}
kfree(chunk_array);
filp->private_data = buf;
return 0;
return nonseekable_open(inode, filp);
}
static int zcore_memmap_release(struct inode *inode, struct file *filp)
......@@ -473,7 +473,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
return 0;
return nonseekable_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
......
......@@ -803,6 +803,7 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
};
......
......@@ -616,7 +616,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
s390_idle_check();
s390_idle_check(regs, S390_lowcore.int_clock,
S390_lowcore.async_enter_timer);
irq_enter();
__get_cpu_var(s390_idle).nohz_delay = 1;
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
......
......@@ -648,6 +648,8 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
struct cpuid cpu_id;
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
......@@ -658,8 +660,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
get_cpu_id(&cpu_id);
css->global_pgid.cpu_id = cpu_id.ident;
css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
......@@ -1062,6 +1065,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
}
static const struct file_operations cio_settle_proc_fops = {
.open = nonseekable_open,
.write = cio_settle_write,
};
......
......@@ -13,8 +13,8 @@
#include <asm/debug.h>
#include "chsc.h"
#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
/*
* if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
......@@ -296,10 +296,8 @@ struct qdio_q {
struct qdio_irq *irq_ptr;
struct sl *sl;
/*
* Warning: Leave this member at the end so it won't be cleared in
* qdio_fill_qs. A page is allocated under this pointer and used for
* slib and sl. slib is 2048 bytes big and sl points to offset
* PAGE_SIZE / 2.
* A page is allocated under this pointer and used for slib and sl.
* slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
*/
struct slib *slib;
} __attribute__ ((aligned(256)));
......@@ -372,11 +370,6 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
static inline unsigned long long get_usecs(void)
{
return monotonic_clock() >> 12;
}
#define pci_out_supported(q) \
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
......
......@@ -336,10 +336,10 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
if (!start_time) {
start_time = get_usecs();
start_time = get_clock();
goto again;
}
if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
return cc;
......@@ -536,7 +536,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
q->u.in.timestamp = get_usecs();
q->u.in.timestamp = get_clock();
return 1;
} else
return 0;
......@@ -567,7 +567,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
......@@ -606,7 +606,7 @@ static void qdio_kick_handler(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_inbound);
again:
if (!qdio_inbound_q_moved(q))
return;
......@@ -615,7 +615,10 @@ static void __qdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
goto again;
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
return;
}
}
qdio_stop_polling(q);
......@@ -625,7 +628,8 @@ static void __qdio_inbound_processing(struct qdio_q *q)
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
goto again;
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
}
......@@ -955,6 +959,9 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
if (irq_ptr->perf_stat_enabled)
irq_ptr->perf_stat.qdio_int++;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
......@@ -1016,30 +1023,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
/**
* qdio_cleanup - shutdown queues and free data structures
* @cdev: associated ccw device
* @how: use halt or clear to shutdown
*
* This function calls qdio_shutdown() for @cdev with method @how.
* and qdio_free(). The qdio_free() return value is ignored since
* !irq_ptr is already checked.
*/
int qdio_cleanup(struct ccw_device *cdev, int how)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int rc;
if (!irq_ptr)
return -ENODEV;
rc = qdio_shutdown(cdev, how);
qdio_free(cdev);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_cleanup);
static void qdio_shutdown_queues(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
......@@ -1156,28 +1139,6 @@ int qdio_free(struct ccw_device *cdev)
}
EXPORT_SYMBOL_GPL(qdio_free);
/**
* qdio_initialize - allocate and establish queues for a qdio subchannel
* @init_data: initialization data
*
* This function first allocates queues via qdio_allocate() and on success
* establishes them via qdio_establish().
*/
int qdio_initialize(struct qdio_initialize *init_data)
{
int rc;
rc = qdio_allocate(init_data);
if (rc)
return rc;
rc = qdio_establish(init_data);
if (rc)
qdio_free(init_data->cdev);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_initialize);
/**
* qdio_allocate - allocate qdio queues and associated data
* @init_data: initialization data
......
......@@ -106,10 +106,12 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
/* must be cleared by every qdio_establish */
memset(q, 0, ((char *)&q->slib) - ((char *)q));
memset(q->slib, 0, PAGE_SIZE);
struct slib *slib = q->slib;
/* queue must be cleared for qdio_establish */
memset(q, 0, sizeof(*q));
memset(slib, 0, PAGE_SIZE);
q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
......
......@@ -95,7 +95,7 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
for_each_input_queue(irq_ptr, q, i)
list_add_rcu(&q->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
xchg(irq_ptr->dsci, 1);
xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
......@@ -173,7 +173,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
/* prevent racing */
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
}
}
......
......@@ -302,7 +302,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
static int zcrypt_open(struct inode *inode, struct file *filp)
{
atomic_inc(&zcrypt_open_count);
return 0;
return nonseekable_open(inode, filp);
}
/**
......
......@@ -1292,13 +1292,14 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
if (card->info.type == QETH_CARD_TYPE_IQD)
rc = qdio_cleanup(CARD_DDEV(card),
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
rc = qdio_cleanup(CARD_DDEV(card),
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
......@@ -3810,10 +3811,18 @@ static int qeth_qdio_establish(struct qeth_card *card)
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
rc = qdio_initialize(&init_data);
if (rc)
rc = qdio_allocate(&init_data);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
goto out;
}
rc = qdio_establish(&init_data);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
qdio_free(CARD_DDEV(card));
}
}
out:
kfree(out_sbal_ptrs);
kfree(in_sbal_ptrs);
kfree(qib_param_field);
......
......@@ -254,6 +254,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
}
static const struct file_operations zfcp_cfdc_fops = {
.open = nonseekable_open,
.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zfcp_cfdc_dev_ioctl
......
......@@ -394,6 +394,7 @@ typedef struct elf64_shdr {
#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
#define NT_S390_CTRS 0x304 /* s390 control registers */
#define NT_S390_PREFIX 0x305 /* s390 prefix register */
#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
/* Note header in a PT_NOTE section */
......
......@@ -621,7 +621,7 @@ static struct ctl_table kern_table[] = {
#endif
{
.procname = "userprocess_debug",
.data = &sysctl_userprocess_debug,
.data = &show_unhandled_signals,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
......@@ -1431,7 +1431,8 @@ static struct ctl_table fs_table[] = {
};
static struct ctl_table debug_table[] = {
#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC)
#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \
defined(CONFIG_S390)
{
.procname = "exception-trace",
.data = &show_unhandled_signals,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment