Commit 72aafdf0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:

 - Add the CPU id for the new z13s machine

 - Add a s390 specific XOR template for RAID-5 checksumming based on the
   XC instruction.  Remove all other alternatives, XC is always faster

 - The merge of our four different stack tracers into a single one

 - Tidy up the code related to page tables, several large inline
   functions are now out-of-line.  Bloat-o-meter reports ~11K text size
   reduction

 - A binary interface for the priviledged CLP instruction to retrieve
   the hardware view of the installed PCI functions

 - Improvements for the dasd format code

 - Bug fixes and cleanups

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (31 commits)
  s390/pci: enforce fmb page boundary rule
  s390: fix floating pointer register corruption (again)
  s390/cpumf: add missing lpp magic initialization
  s390: Fix misspellings in comments
  s390/mm: split arch/s390/mm/pgtable.c
  s390/mm: uninline pmdp_xxx functions from pgtable.h
  s390/mm: uninline ptep_xxx functions from pgtable.h
  s390/pci: add ioctl interface for CLP
  s390: Use pr_warn instead of pr_warning
  s390/dasd: remove casts to dasd_*_private
  s390/dasd: Refactor dasd format functions
  s390/dasd: Simplify code in format logic
  s390/dasd: Improve dasd format code
  s390/percpu: remove this_cpu_cmpxchg_double_4
  s390/cpumf: Improve guest detection heuristics
  s390/fault: merge report_user_fault implementations
  s390/dis: use correct escape sequence for '%' character
  s390/kvm: simplify set_guest_storage_key
  s390/oprofile: add z13/z13s model numbers
  s390: add z13s model number to z13 elf platform
  ...
parents 1c8e85b1 80c544de
...@@ -254,12 +254,12 @@ config MARCH_ZEC12 ...@@ -254,12 +254,12 @@ config MARCH_ZEC12
older machines. older machines.
config MARCH_Z13 config MARCH_Z13
bool "IBM z13" bool "IBM z13s and z13"
select HAVE_MARCH_Z13_FEATURES select HAVE_MARCH_Z13_FEATURES
help help
Select this to enable optimizations for IBM z13 (2964 series). Select this to enable optimizations for IBM z13s and z13 (2965 and
The kernel will be slightly faster but will not work on older 2964 series). The kernel will be slightly faster but will not work on
machines. older machines.
endchoice endchoice
......
...@@ -4,14 +4,23 @@ ...@@ -4,14 +4,23 @@
/* CLP common request & response block size */ /* CLP common request & response block size */
#define CLP_BLK_SIZE PAGE_SIZE #define CLP_BLK_SIZE PAGE_SIZE
#define CLP_LPS_BASE 0
#define CLP_LPS_PCI 2
struct clp_req_hdr { struct clp_req_hdr {
u16 len; u16 len;
u16 cmd; u16 cmd;
u32 fmt : 4;
u32 reserved1 : 28;
u64 reserved2;
} __packed; } __packed;
struct clp_rsp_hdr { struct clp_rsp_hdr {
u16 len; u16 len;
u16 rsp; u16 rsp;
u32 fmt : 4;
u32 reserved1 : 28;
u64 reserved2;
} __packed; } __packed;
/* CLP Response Codes */ /* CLP Response Codes */
...@@ -25,4 +34,22 @@ struct clp_rsp_hdr { ...@@ -25,4 +34,22 @@ struct clp_rsp_hdr {
#define CLP_RC_NODATA 0x0080 /* No data available */ #define CLP_RC_NODATA 0x0080 /* No data available */
#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */ #define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
/* Store logical-processor characteristics request */
struct clp_req_slpc {
struct clp_req_hdr hdr;
} __packed;
struct clp_rsp_slpc {
struct clp_rsp_hdr hdr;
u32 reserved2[4];
u32 lpif[8];
u32 reserved3[8];
u32 lpic[8];
} __packed;
struct clp_req_rsp_slpc {
struct clp_req_slpc request;
struct clp_rsp_slpc response;
} __packed;
#endif #endif
/*
* KVM guest address space mapping code
*
* Copyright IBM Corp. 2007, 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _ASM_S390_GMAP_H
#define _ASM_S390_GMAP_H
/**
* struct gmap_struct - guest address space
* @crst_list: list of all crst tables used in the guest address space
* @mm: pointer to the parent mm_struct
* @guest_to_host: radix tree with guest to host address translation
* @host_to_guest: radix tree with pointer to segment table entries
* @guest_table_lock: spinlock to protect all entries in the guest page table
* @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @pfault_enabled: defines if pfaults are applicable for the guest
*/
struct gmap {
struct list_head list;
struct list_head crst_list;
struct mm_struct *mm;
struct radix_tree_root guest_to_host;
struct radix_tree_root host_to_guest;
spinlock_t guest_table_lock;
unsigned long *table;
unsigned long asce;
unsigned long asce_end;
void *private;
bool pfault_enabled;
};
/**
* struct gmap_notifier - notify function block for page invalidation
* @notifier_call: address of callback function
*/
struct gmap_notifier {
struct list_head list;
void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
};
struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
void gmap_free(struct gmap *gmap);
void gmap_enable(struct gmap *gmap);
void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
void __gmap_zap(struct gmap *, unsigned long gaddr);
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
void gmap_register_ipte_notifier(struct gmap_notifier *);
void gmap_unregister_ipte_notifier(struct gmap_notifier *);
int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
#endif /* _ASM_S390_GMAP_H */
...@@ -45,7 +45,7 @@ struct zpci_fmb { ...@@ -45,7 +45,7 @@ struct zpci_fmb {
u64 rpcit_ops; u64 rpcit_ops;
u64 dma_rbytes; u64 dma_rbytes;
u64 dma_wbytes; u64 dma_wbytes;
} __packed __aligned(16); } __packed __aligned(64);
enum zpci_state { enum zpci_state {
ZPCI_FN_STATE_RESERVED, ZPCI_FN_STATE_RESERVED,
...@@ -66,7 +66,6 @@ struct s390_domain; ...@@ -66,7 +66,6 @@ struct s390_domain;
/* Private data per function */ /* Private data per function */
struct zpci_dev { struct zpci_dev {
struct pci_dev *pdev;
struct pci_bus *bus; struct pci_bus *bus;
struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */ struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
...@@ -192,7 +191,7 @@ int zpci_fmb_disable_device(struct zpci_dev *); ...@@ -192,7 +191,7 @@ int zpci_fmb_disable_device(struct zpci_dev *);
/* Debug */ /* Debug */
int zpci_debug_init(void); int zpci_debug_init(void);
void zpci_debug_exit(void); void zpci_debug_exit(void);
void zpci_debug_init_device(struct zpci_dev *); void zpci_debug_init_device(struct zpci_dev *, const char *);
void zpci_debug_exit_device(struct zpci_dev *); void zpci_debug_exit_device(struct zpci_dev *);
void zpci_debug_info(struct zpci_dev *, struct seq_file *); void zpci_debug_info(struct zpci_dev *, struct seq_file *);
......
...@@ -49,9 +49,6 @@ struct clp_fh_list_entry { ...@@ -49,9 +49,6 @@ struct clp_fh_list_entry {
/* List PCI functions request */ /* List PCI functions request */
struct clp_req_list_pci { struct clp_req_list_pci {
struct clp_req_hdr hdr; struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u64 resume_token; u64 resume_token;
u64 reserved2; u64 reserved2;
} __packed; } __packed;
...@@ -59,9 +56,6 @@ struct clp_req_list_pci { ...@@ -59,9 +56,6 @@ struct clp_req_list_pci {
/* List PCI functions response */ /* List PCI functions response */
struct clp_rsp_list_pci { struct clp_rsp_list_pci {
struct clp_rsp_hdr hdr; struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u64 resume_token; u64 resume_token;
u32 reserved2; u32 reserved2;
u16 max_fn; u16 max_fn;
...@@ -73,9 +67,6 @@ struct clp_rsp_list_pci { ...@@ -73,9 +67,6 @@ struct clp_rsp_list_pci {
/* Query PCI function request */ /* Query PCI function request */
struct clp_req_query_pci { struct clp_req_query_pci {
struct clp_req_hdr hdr; struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */ u32 fh; /* function handle */
u32 reserved2; u32 reserved2;
u64 reserved3; u64 reserved3;
...@@ -84,9 +75,6 @@ struct clp_req_query_pci { ...@@ -84,9 +75,6 @@ struct clp_req_query_pci {
/* Query PCI function response */ /* Query PCI function response */
struct clp_rsp_query_pci { struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr; struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 : 64;
u16 vfn; /* virtual fn number */ u16 vfn; /* virtual fn number */
u16 : 7; u16 : 7;
u16 util_str_avail : 1; /* utility string available? */ u16 util_str_avail : 1; /* utility string available? */
...@@ -108,21 +96,15 @@ struct clp_rsp_query_pci { ...@@ -108,21 +96,15 @@ struct clp_rsp_query_pci {
/* Query PCI function group request */ /* Query PCI function group request */
struct clp_req_query_pci_grp { struct clp_req_query_pci_grp {
struct clp_req_hdr hdr; struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */ u32 reserved2 : 24;
u32 : 28;
u64 reserved1;
u32 : 24;
u32 pfgid : 8; /* function group id */ u32 pfgid : 8; /* function group id */
u32 reserved2; u32 reserved3;
u64 reserved3; u64 reserved4;
} __packed; } __packed;
/* Query PCI function group response */ /* Query PCI function group response */
struct clp_rsp_query_pci_grp { struct clp_rsp_query_pci_grp {
struct clp_rsp_hdr hdr; struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u16 : 4; u16 : 4;
u16 noi : 12; /* number of interrupts */ u16 noi : 12; /* number of interrupts */
u8 version; u8 version;
...@@ -141,9 +123,6 @@ struct clp_rsp_query_pci_grp { ...@@ -141,9 +123,6 @@ struct clp_rsp_query_pci_grp {
/* Set PCI function request */ /* Set PCI function request */
struct clp_req_set_pci { struct clp_req_set_pci {
struct clp_req_hdr hdr; struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */ u32 fh; /* function handle */
u16 reserved2; u16 reserved2;
u8 oc; /* operation controls */ u8 oc; /* operation controls */
...@@ -154,9 +133,6 @@ struct clp_req_set_pci { ...@@ -154,9 +133,6 @@ struct clp_req_set_pci {
/* Set PCI function response */ /* Set PCI function response */
struct clp_rsp_set_pci { struct clp_rsp_set_pci {
struct clp_rsp_hdr hdr; struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */ u32 fh; /* function handle */
u32 reserved3; u32 reserved3;
u64 reserved4; u64 reserved4;
......
...@@ -178,7 +178,6 @@ ...@@ -178,7 +178,6 @@
ret__; \ ret__; \
}) })
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define PMU_F_ERR_LSDA 0x0200 #define PMU_F_ERR_LSDA 0x0200
#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) #define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
/* Perf defintions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */
extern __init const struct attribute_group **cpumf_cf_event_group(void); extern __init const struct attribute_group **cpumf_cf_event_group(void);
extern ssize_t cpumf_events_sysfs_show(struct device *dev, extern ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
......
...@@ -23,10 +23,6 @@ void page_table_free(struct mm_struct *, unsigned long *); ...@@ -23,10 +23,6 @@ void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
extern int page_table_allocate_pgste; extern int page_table_allocate_pgste;
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq);
unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n) static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{ {
typedef struct { char _[n]; } addrtype; typedef struct { char _[n]; } addrtype;
......
This diff is collapsed.
...@@ -184,6 +184,10 @@ struct task_struct; ...@@ -184,6 +184,10 @@ struct task_struct;
struct mm_struct; struct mm_struct;
struct seq_file; struct seq_file;
typedef int (*dump_trace_func_t)(void *data, unsigned long address);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
void show_cacheinfo(struct seq_file *m); void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
...@@ -203,6 +207,14 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -203,6 +207,14 @@ unsigned long get_wchan(struct task_struct *p);
/* Has task runtime instrumentation enabled ? */ /* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb) #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
static inline unsigned long current_stack_pointer(void)
{
unsigned long sp;
asm volatile("la %0,0(15)" : "=a" (sp));
return sp;
}
static inline unsigned short stap(void) static inline unsigned short stap(void)
{ {
unsigned short cpu_address; unsigned short cpu_address;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
* This should be totally fair - if anything is waiting, a process that wants a * This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is * lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only * released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the * that will be woken up; if there's a bunch of consecutive readers at the
* front, then they'll all be woken up, but no other readers will be. * front, then they'll all be woken up, but no other readers will be.
*/ */
......
...@@ -101,6 +101,8 @@ extern void pfault_fini(void); ...@@ -101,6 +101,8 @@ extern void pfault_fini(void);
#define pfault_fini() do { } while (0) #define pfault_fini() do { } while (0)
#endif /* CONFIG_PFAULT */ #endif /* CONFIG_PFAULT */
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault);
extern void cmma_init(void); extern void cmma_init(void);
extern void (*_machine_restart)(char *command); extern void (*_machine_restart)(char *command);
......
#include <asm-generic/xor.h> /*
* Optimited xor routines
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _ASM_S390_XOR_H
#define _ASM_S390_XOR_H
extern struct xor_block_template xor_block_xc;
#undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \
do { \
xor_speed(&xor_block_xc); \
} while (0)
#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_xc)
#endif /* _ASM_S390_XOR_H */
/*
* ioctl interface for /dev/clp
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _ASM_CLP_H
#define _ASM_CLP_H
#include <linux/types.h>
#include <linux/ioctl.h>
struct clp_req {
unsigned int c : 1;
unsigned int r : 1;
unsigned int lps : 6;
unsigned int cmd : 8;
unsigned int : 16;
unsigned int reserved;
__u64 data_p;
};
#define CLP_IOCTL_MAGIC 'c'
#define CLP_SYNC _IOWR(CLP_IOCTL_MAGIC, 0xC1, struct clp_req)
#endif
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/idle.h> #include <asm/idle.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/gmap.h>
/* /*
* Make sure that the compiler is new enough. We want a compiler that * Make sure that the compiler is new enough. We want a compiler that
......
...@@ -96,8 +96,7 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) ...@@ -96,8 +96,7 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
(((unsigned long)response + rlen) >> 31)) { (((unsigned long)response + rlen) >> 31)) {
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
if (!lowbuf) { if (!lowbuf) {
pr_warning("The cpcmd kernel function failed to " pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
"allocate a response buffer\n");
return -ENOMEM; return -ENOMEM;
} }
spin_lock_irqsave(&cpcmd_lock, flags); spin_lock_irqsave(&cpcmd_lock, flags);
......
...@@ -699,8 +699,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, ...@@ -699,8 +699,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
/* Since debugfs currently does not support uid/gid other than root, */ /* Since debugfs currently does not support uid/gid other than root, */
/* we do not allow gid/uid != 0 until we get support for that. */ /* we do not allow gid/uid != 0 until we get support for that. */
if ((uid != 0) || (gid != 0)) if ((uid != 0) || (gid != 0))
pr_warning("Root becomes the owner of all s390dbf files " pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
"in sysfs\n");
BUG_ON(!initialized); BUG_ON(!initialized);
mutex_lock(&debug_mutex); mutex_lock(&debug_mutex);
...@@ -1307,8 +1306,7 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view, ...@@ -1307,8 +1306,7 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view,
new_level = debug_get_uint(str); new_level = debug_get_uint(str);
} }
if(new_level < 0) { if(new_level < 0) {
pr_warning("%s is not a valid level for a debug " pr_warn("%s is not a valid level for a debug feature\n", str);
"feature\n", str);
rc = -EINVAL; rc = -EINVAL;
} else { } else {
debug_set_level(id, new_level); debug_set_level(id, new_level);
......
...@@ -1920,23 +1920,16 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) ...@@ -1920,23 +1920,16 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
} }
if (separator) if (separator)
ptr += sprintf(ptr, "%c", separator); ptr += sprintf(ptr, "%c", separator);
/*
* Use four '%' characters below because of the
* following two conversions:
*
* 1) sprintf: %%%%r -> %%r
* 2) printk : %%r -> %r
*/
if (operand->flags & OPERAND_GPR) if (operand->flags & OPERAND_GPR)
ptr += sprintf(ptr, "%%%%r%i", value); ptr += sprintf(ptr, "%%r%i", value);
else if (operand->flags & OPERAND_FPR) else if (operand->flags & OPERAND_FPR)
ptr += sprintf(ptr, "%%%%f%i", value); ptr += sprintf(ptr, "%%f%i", value);
else if (operand->flags & OPERAND_AR) else if (operand->flags & OPERAND_AR)
ptr += sprintf(ptr, "%%%%a%i", value); ptr += sprintf(ptr, "%%a%i", value);
else if (operand->flags & OPERAND_CR) else if (operand->flags & OPERAND_CR)
ptr += sprintf(ptr, "%%%%c%i", value); ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR) else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%%%v%i", value); ptr += sprintf(ptr, "%%v%i", value);
else if (operand->flags & OPERAND_PCREL) else if (operand->flags & OPERAND_PCREL)
ptr += sprintf(ptr, "%lx", (signed int) value ptr += sprintf(ptr, "%lx", (signed int) value
+ addr); + addr);
......
...@@ -19,28 +19,28 @@ ...@@ -19,28 +19,28 @@
#include <asm/ipl.h> #include <asm/ipl.h>
/* /*
* For show_trace we have tree different stack to consider: * For dump_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown * - the panic stack which is used if the kernel stack has overflown
* - the asynchronous interrupt stack (cpu related) * - the asynchronous interrupt stack (cpu related)
* - the synchronous kernel stack (process related) * - the synchronous kernel stack (process related)
* The stack trace can start at any of the three stack and can potentially * The stack trace can start at any of the three stacks and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack. * touch all of them. The order is: panic stack, async stack, sync stack.
*/ */
static unsigned long static unsigned long
__show_trace(unsigned long sp, unsigned long low, unsigned long high) __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
unsigned long low, unsigned long high)
{ {
struct stack_frame *sf; struct stack_frame *sf;
struct pt_regs *regs; struct pt_regs *regs;
unsigned long addr;
while (1) { while (1) {
if (sp < low || sp > high - sizeof(*sf)) if (sp < low || sp > high - sizeof(*sf))
return sp; return sp;
sf = (struct stack_frame *) sp; sf = (struct stack_frame *) sp;
addr = sf->gprs[8];
printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
/* Follow the backchain. */ /* Follow the backchain. */
while (1) { while (1) {
if (func(data, sf->gprs[8]))
return sp;
low = sp; low = sp;
sp = sf->back_chain; sp = sf->back_chain;
if (!sp) if (!sp)
...@@ -48,46 +48,58 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) ...@@ -48,46 +48,58 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
if (sp <= low || sp > high - sizeof(*sf)) if (sp <= low || sp > high - sizeof(*sf))
return sp; return sp;
sf = (struct stack_frame *) sp; sf = (struct stack_frame *) sp;
addr = sf->gprs[8];
printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
} }
/* Zero backchain detected, check for interrupt frame. */ /* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1); sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs)) if (sp <= low || sp > high - sizeof(*regs))
return sp; return sp;
regs = (struct pt_regs *) sp; regs = (struct pt_regs *) sp;
addr = regs->psw.addr; if (!user_mode(regs)) {
printk(" [<%016lx>] %pSR\n", addr, (void *)addr); if (func(data, regs->psw.addr))
return sp;
}
low = sp; low = sp;
sp = regs->gprs[15]; sp = regs->gprs[15];
} }
} }
static void show_trace(struct task_struct *task, unsigned long *stack) void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
unsigned long sp)
{ {
const unsigned long frame_size = unsigned long frame_size;
STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
register unsigned long __r15 asm ("15");
unsigned long sp;
sp = (unsigned long) stack; frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
if (!sp)
sp = task ? task->thread.ksp : __r15;
printk("Call Trace:\n");
#ifdef CONFIG_CHECK_STACK #ifdef CONFIG_CHECK_STACK
sp = __show_trace(sp, sp = __dump_trace(func, data, sp,
S390_lowcore.panic_stack + frame_size - 4096, S390_lowcore.panic_stack + frame_size - 4096,
S390_lowcore.panic_stack + frame_size); S390_lowcore.panic_stack + frame_size);
#endif #endif
sp = __show_trace(sp, sp = __dump_trace(func, data, sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE, S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size); S390_lowcore.async_stack + frame_size);
if (task) if (task)
__show_trace(sp, (unsigned long) task_stack_page(task), __dump_trace(func, data, sp,
(unsigned long) task_stack_page(task) + THREAD_SIZE); (unsigned long)task_stack_page(task),
(unsigned long)task_stack_page(task) + THREAD_SIZE);
else else
__show_trace(sp, S390_lowcore.thread_info, __dump_trace(func, data, sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE); S390_lowcore.thread_info + THREAD_SIZE);
}
EXPORT_SYMBOL_GPL(dump_trace);
static int show_address(void *data, unsigned long address)
{
printk("([<%016lx>] %pSR)\n", address, (void *)address);
return 0;
}
static void show_trace(struct task_struct *task, unsigned long sp)
{
if (!sp)
sp = task ? task->thread.ksp : current_stack_pointer();
printk("Call Trace:\n");
dump_trace(show_address, NULL, task, sp);
if (!task) if (!task)
task = current; task = current;
debug_show_held_locks(task); debug_show_held_locks(task);
...@@ -95,15 +107,16 @@ static void show_trace(struct task_struct *task, unsigned long *stack) ...@@ -95,15 +107,16 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
void show_stack(struct task_struct *task, unsigned long *sp) void show_stack(struct task_struct *task, unsigned long *sp)
{ {
register unsigned long *__r15 asm ("15");
unsigned long *stack; unsigned long *stack;
int i; int i;
if (!sp)
stack = task ? (unsigned long *) task->thread.ksp : __r15;
else
stack = sp; stack = sp;
if (!stack) {
if (!task)
stack = (unsigned long *)current_stack_pointer();
else
stack = (unsigned long *)task->thread.ksp;
}
for (i = 0; i < 20; i++) { for (i = 0; i < 20; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0) if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break; break;
...@@ -112,7 +125,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) ...@@ -112,7 +125,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
printk("%016lx ", *stack++); printk("%016lx ", *stack++);
} }
printk("\n"); printk("\n");
show_trace(task, sp); show_trace(task, (unsigned long)sp);
} }
static void show_last_breaking_event(struct pt_regs *regs) static void show_last_breaking_event(struct pt_regs *regs)
...@@ -121,13 +134,9 @@ static void show_last_breaking_event(struct pt_regs *regs) ...@@ -121,13 +134,9 @@ static void show_last_breaking_event(struct pt_regs *regs)
printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
} }
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
{
return (regs->psw.mask & bits) / ((~bits + 1) & bits);
}
void show_registers(struct pt_regs *regs) void show_registers(struct pt_regs *regs)
{ {
struct psw_bits *psw = &psw_bits(regs->psw);
char *mode; char *mode;
mode = user_mode(regs) ? "User" : "Krnl"; mode = user_mode(regs) ? "User" : "Krnl";
...@@ -136,13 +145,9 @@ void show_registers(struct pt_regs *regs) ...@@ -136,13 +145,9 @@ void show_registers(struct pt_regs *regs)
printk(" (%pSR)", (void *)regs->psw.addr); printk(" (%pSR)", (void *)regs->psw.addr);
printk("\n"); printk("\n");
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), printk(" RI:%x EA:%x", psw->ri, psw->eaba);
mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode, printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" %016lx %016lx %016lx %016lx\n", printk(" %016lx %016lx %016lx %016lx\n",
...@@ -160,7 +165,7 @@ void show_regs(struct pt_regs *regs) ...@@ -160,7 +165,7 @@ void show_regs(struct pt_regs *regs)
show_registers(regs); show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */ /* Show stack backtrace if pt_regs is from kernel mode */
if (!user_mode(regs)) if (!user_mode(regs))
show_trace(NULL, (unsigned long *) regs->gprs[15]); show_trace(NULL, regs->gprs[15]);
show_last_breaking_event(regs); show_last_breaking_event(regs);
} }
......
...@@ -186,6 +186,7 @@ ENTRY(__switch_to) ...@@ -186,6 +186,7 @@ ENTRY(__switch_to)
stg %r5,__LC_THREAD_INFO # store thread info of next stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1) # load kernel stack of next lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
...@@ -1199,114 +1200,12 @@ cleanup_critical: ...@@ -1199,114 +1200,12 @@ cleanup_critical:
.quad .Lpsw_idle_lpsw .quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs: .Lcleanup_save_fpu_regs:
TSTMSK __LC_CPU_FLAGS,_CIF_FPU larl %r9,save_fpu_regs
bor %r14
clg %r9,BASED(.Lcleanup_save_fpu_regs_done)
jhe 5f
clg %r9,BASED(.Lcleanup_save_fpu_regs_fp)
jhe 4f
clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
jhe 3f
clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
jhe 2f
clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
jhe 1f
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
0: # Store floating-point controls
stfpc __THREAD_FPU_fpc(%r2)
1: # Load register save area and check if VX is active
lg %r3,__THREAD_FPU_regs(%r2)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
jz 4f # no VX -> store FP regs
2: # Store vector registers (V0-V15)
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
3: # Store vector registers (V16-V31)
VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
j 5f # -> done, set CIF_FPU flag
4: # Store floating-point registers
std 0,0(%r3)
std 1,8(%r3)
std 2,16(%r3)
std 3,24(%r3)
std 4,32(%r3)
std 5,40(%r3)
std 6,48(%r3)
std 7,56(%r3)
std 8,64(%r3)
std 9,72(%r3)
std 10,80(%r3)
std 11,88(%r3)
std 12,96(%r3)
std 13,104(%r3)
std 14,112(%r3)
std 15,120(%r3)
5: # Set CIF_FPU flag
oi __LC_CPU_FLAGS+7,_CIF_FPU
lg %r9,48(%r11) # return from save_fpu_regs
br %r14 br %r14
.Lcleanup_save_fpu_fpc_end:
.quad .Lsave_fpu_regs_fpc_end
.Lcleanup_save_fpu_regs_vx_low:
.quad .Lsave_fpu_regs_vx_low
.Lcleanup_save_fpu_regs_vx_high:
.quad .Lsave_fpu_regs_vx_high
.Lcleanup_save_fpu_regs_fp:
.quad .Lsave_fpu_regs_fp
.Lcleanup_save_fpu_regs_done:
.quad .Lsave_fpu_regs_done
.Lcleanup_load_fpu_regs: .Lcleanup_load_fpu_regs:
TSTMSK __LC_CPU_FLAGS,_CIF_FPU larl %r9,load_fpu_regs
bnor %r14
clg %r9,BASED(.Lcleanup_load_fpu_regs_done)
jhe 1f
clg %r9,BASED(.Lcleanup_load_fpu_regs_fp)
jhe 2f
clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
jhe 3f
clg %r9,BASED(.Lcleanup_load_fpu_regs_vx)
jhe 4f
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
lfpc __THREAD_FPU_fpc(%r4)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz 2f # -> no VX, load FP regs
4: # Load V0 ..V15 registers
VLM %v0,%v15,0,%r4
3: # Load V16..V31 registers
VLM %v16,%v31,256,%r4
j 1f
2: # Load floating-point registers
ld 0,0(%r4)
ld 1,8(%r4)
ld 2,16(%r4)
ld 3,24(%r4)
ld 4,32(%r4)
ld 5,40(%r4)
ld 6,48(%r4)
ld 7,56(%r4)
ld 8,64(%r4)
ld 9,72(%r4)
ld 10,80(%r4)
ld 11,88(%r4)
ld 12,96(%r4)
ld 13,104(%r4)
ld 14,112(%r4)
ld 15,120(%r4)
1: # Clear CIF_FPU bit
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
lg %r9,48(%r11) # return from load_fpu_regs
br %r14 br %r14
.Lcleanup_load_fpu_regs_vx:
.quad .Lload_fpu_regs_vx
.Lcleanup_load_fpu_regs_vx_high:
.quad .Lload_fpu_regs_vx_high
.Lcleanup_load_fpu_regs_fp:
.quad .Lload_fpu_regs_fp
.Lcleanup_load_fpu_regs_done:
.quad .Lload_fpu_regs_done
/* /*
* Integer constants * Integer constants
......
...@@ -164,8 +164,7 @@ void do_softirq_own_stack(void) ...@@ -164,8 +164,7 @@ void do_softirq_own_stack(void)
{ {
unsigned long old, new; unsigned long old, new;
/* Get current stack pointer. */ old = current_stack_pointer();
asm volatile("la %0,0(15)" : "=a" (old));
/* Check against async. stack address range. */ /* Check against async. stack address range. */
new = S390_lowcore.async_stack; new = S390_lowcore.async_stack;
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
......
...@@ -383,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -383,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event)
/* Validate the counter that is assigned to this event. /* Validate the counter that is assigned to this event.
* Because the counter facility can use numerous counters at the * Because the counter facility can use numerous counters at the
* same time without constraints, it is not necessary to explicity * same time without constraints, it is not necessary to explicitly
* validate event groups (event->group_leader != event). * validate event groups (event->group_leader != event).
*/ */
err = validate_event(hwc); err = validate_event(hwc);
......
...@@ -1022,10 +1022,13 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) ...@@ -1022,10 +1022,13 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
/* /*
* A non-zero guest program parameter indicates a guest * A non-zero guest program parameter indicates a guest
* sample. * sample.
* Note that some early samples might be misaccounted to * Note that some early samples or samples from guests without
* the host. * lpp usage would be misaccounted to the host. We use the asn
* value as a heuristic to detect most of these guest samples.
* If the value differs from the host hpp value, we assume
* it to be a KVM guest.
*/ */
if (sfr->basic.gpp) if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
sde_regs->in_guest = 1; sde_regs->in_guest = 1;
overflow = 0; overflow = 0;
......
...@@ -222,67 +222,23 @@ static int __init service_level_perf_register(void) ...@@ -222,67 +222,23 @@ static int __init service_level_perf_register(void)
} }
arch_initcall(service_level_perf_register); arch_initcall(service_level_perf_register);
/* See also arch/s390/kernel/traps.c */ static int __perf_callchain_kernel(void *data, unsigned long address)
static unsigned long __store_trace(struct perf_callchain_entry *entry,
unsigned long sp,
unsigned long low, unsigned long high)
{ {
struct stack_frame *sf; struct perf_callchain_entry *entry = data;
struct pt_regs *regs;
perf_callchain_store(entry, address);
while (1) { return 0;
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
perf_callchain_store(entry, sf->gprs[8]);
/* Follow the backchain. */
while (1) {
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
perf_callchain_store(entry, sf->gprs[8]);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
perf_callchain_store(entry, sf->gprs[8]);
low = sp;
sp = regs->gprs[15];
}
} }
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long head, frame_size;
struct stack_frame *head_sf;
if (user_mode(regs)) if (user_mode(regs))
return; return;
dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
head = regs->gprs[15];
head_sf = (struct stack_frame *) head;
if (!head_sf || !head_sf->back_chain)
return;
head = head_sf->back_chain;
head = __store_trace(entry, head,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size);
__store_trace(entry, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
} }
/* Perf defintions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */
ssize_t cpumf_events_sysfs_show(struct device *dev, ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page) struct device_attribute *attr, char *page)
{ {
......
...@@ -327,6 +327,7 @@ static void __init setup_lowcore(void) ...@@ -327,6 +327,7 @@ static void __init setup_lowcore(void)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union; lc->thread_info = (unsigned long) &init_thread_union;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags; lc->machine_flags = S390_lowcore.machine_flags;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list; lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
...@@ -779,6 +780,7 @@ static int __init setup_hwcaps(void) ...@@ -779,6 +780,7 @@ static int __init setup_hwcaps(void)
strcpy(elf_platform, "zEC12"); strcpy(elf_platform, "zEC12");
break; break;
case 0x2964: case 0x2964:
case 0x2965:
strcpy(elf_platform, "z13"); strcpy(elf_platform, "z13");
break; break;
} }
......
...@@ -10,78 +10,39 @@ ...@@ -10,78 +10,39 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/module.h> #include <linux/module.h>
static unsigned long save_context_stack(struct stack_trace *trace, static int __save_address(void *data, unsigned long address, int nosched)
unsigned long sp,
unsigned long low,
unsigned long high,
int savesched)
{ {
struct stack_frame *sf; struct stack_trace *trace = data;
struct pt_regs *regs;
unsigned long addr;
while(1) { if (nosched && in_sched_functions(address))
if (sp < low || sp > high) return 0;
return sp; if (trace->skip > 0) {
sf = (struct stack_frame *)sp;
while(1) {
addr = sf->gprs[8];
if (!trace->skip)
trace->entries[trace->nr_entries++] = addr;
else
trace->skip--; trace->skip--;
if (trace->nr_entries >= trace->max_entries) return 0;
return sp;
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *)sp;
} }
/* Zero backchain detected, check for interrupt frame. */ if (trace->nr_entries < trace->max_entries) {
sp = (unsigned long)(sf + 1); trace->entries[trace->nr_entries++] = address;
if (sp <= low || sp > high - sizeof(*regs)) return 0;
return sp;
regs = (struct pt_regs *)sp;
addr = regs->psw.addr;
if (savesched || !in_sched_functions(addr)) {
if (!trace->skip)
trace->entries[trace->nr_entries++] = addr;
else
trace->skip--;
}
if (trace->nr_entries >= trace->max_entries)
return sp;
low = sp;
sp = regs->gprs[15];
} }
return 1;
} }
static void __save_stack_trace(struct stack_trace *trace, unsigned long sp) static int save_address(void *data, unsigned long address)
{ {
unsigned long new_sp, frame_size; return __save_address(data, address, 0);
}
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); static int save_address_nosched(void *data, unsigned long address)
new_sp = save_context_stack(trace, sp, {
S390_lowcore.panic_stack + frame_size - PAGE_SIZE, return __save_address(data, address, 1);
S390_lowcore.panic_stack + frame_size, 1);
new_sp = save_context_stack(trace, new_sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size, 1);
save_context_stack(trace, new_sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE, 1);
} }
void save_stack_trace(struct stack_trace *trace) void save_stack_trace(struct stack_trace *trace)
{ {
register unsigned long r15 asm ("15");
unsigned long sp; unsigned long sp;
sp = r15; sp = current_stack_pointer();
__save_stack_trace(trace, sp); dump_trace(save_address, trace, NULL, sp);
if (trace->nr_entries < trace->max_entries) if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
} }
...@@ -89,16 +50,12 @@ EXPORT_SYMBOL_GPL(save_stack_trace); ...@@ -89,16 +50,12 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{ {
unsigned long sp, low, high; unsigned long sp;
sp = tsk->thread.ksp; sp = tsk->thread.ksp;
if (tsk == current) { if (tsk == current)
/* Get current stack pointer. */ sp = current_stack_pointer();
asm volatile("la %0,0(15)" : "=a" (sp)); dump_trace(save_address_nosched, trace, tsk, sp);
}
low = (unsigned long) task_stack_page(tsk);
high = (unsigned long) task_pt_regs(tsk);
save_context_stack(trace, sp, low, high, 0);
if (trace->nr_entries < trace->max_entries) if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
} }
...@@ -109,7 +66,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) ...@@ -109,7 +66,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
unsigned long sp; unsigned long sp;
sp = kernel_stack_pointer(regs); sp = kernel_stack_pointer(regs);
__save_stack_trace(trace, sp); dump_trace(save_address, trace, NULL, sp);
if (trace->nr_entries < trace->max_entries) if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
} }
......
...@@ -499,8 +499,7 @@ static void etr_reset(void) ...@@ -499,8 +499,7 @@ static void etr_reset(void)
if (etr_port0_online && etr_port1_online) if (etr_port0_online && etr_port1_online)
set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
} else if (etr_port0_online || etr_port1_online) { } else if (etr_port0_online || etr_port1_online) {
pr_warning("The real or virtual hardware system does " pr_warn("The real or virtual hardware system does not provide an ETR interface\n");
"not provide an ETR interface\n");
etr_port0_online = etr_port1_online = 0; etr_port0_online = etr_port1_online = 0;
} }
} }
...@@ -1464,8 +1463,7 @@ static void __init stp_reset(void) ...@@ -1464,8 +1463,7 @@ static void __init stp_reset(void)
if (rc == 0) if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) { else if (stp_online) {
pr_warning("The real or virtual hardware system does " pr_warn("The real or virtual hardware system does not provide an STP interface\n");
"not provide an STP interface\n");
free_page((unsigned long) stp_page); free_page((unsigned long) stp_page);
stp_page = NULL; stp_page = NULL;
stp_online = 0; stp_online = 0;
......
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include "entry.h" #include "entry.h"
int show_unhandled_signals = 1;
static inline void __user *get_trap_ip(struct pt_regs *regs) static inline void __user *get_trap_ip(struct pt_regs *regs)
{ {
unsigned long address; unsigned long address;
...@@ -35,21 +33,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) ...@@ -35,21 +33,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
return (void __user *) (address - (regs->int_code >> 16)); return (void __user *) (address - (regs->int_code >> 16));
} }
static inline void report_user_fault(struct pt_regs *regs, int signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk("User process fault: interruption code %04x ilc:%d ",
regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr("in ", regs->psw.addr);
printk("\n");
show_regs(regs);
}
int is_valid_bugaddr(unsigned long addr) int is_valid_bugaddr(unsigned long addr)
{ {
return 1; return 1;
...@@ -65,7 +48,7 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) ...@@ -65,7 +48,7 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
info.si_code = si_code; info.si_code = si_code;
info.si_addr = get_trap_ip(regs); info.si_addr = get_trap_ip(regs);
force_sig_info(si_signo, &info, current); force_sig_info(si_signo, &info, current);
report_user_fault(regs, si_signo); report_user_fault(regs, si_signo, 0);
} else { } else {
const struct exception_table_entry *fixup; const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr); fixup = search_exception_tables(regs->psw.addr);
...@@ -111,7 +94,7 @@ NOKPROBE_SYMBOL(do_per_trap); ...@@ -111,7 +94,7 @@ NOKPROBE_SYMBOL(do_per_trap);
void default_trap_handler(struct pt_regs *regs) void default_trap_handler(struct pt_regs *regs)
{ {
if (user_mode(regs)) { if (user_mode(regs)) {
report_user_fault(regs, SIGSEGV); report_user_fault(regs, SIGSEGV, 0);
do_exit(SIGSEGV); do_exit(SIGSEGV);
} else } else
die(regs, "Unknown program exception"); die(regs, "Unknown program exception");
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/virtio-ccw.h> #include <asm/virtio-ccw.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "trace.h" #include "trace.h"
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/* /*
* Extends the address range given by *start and *stop to include the address * Extends the address range given by *start and *stop to include the address
* range starting with estart and the length len. Takes care of overflowing * range starting with estart and the length len. Takes care of overflowing
* intervals and tries to minimize the overall intervall size. * intervals and tries to minimize the overall interval size.
*/ */
static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
{ {
...@@ -72,7 +72,7 @@ static void enable_all_hw_bp(struct kvm_vcpu *vcpu) ...@@ -72,7 +72,7 @@ static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
return; return;
/* /*
* If the guest is not interrested in branching events, we can savely * If the guest is not interested in branching events, we can safely
* limit them to the PER address range. * limit them to the PER address range.
*/ */
if (!(*cr9 & PER_EVENT_BRANCH)) if (!(*cr9 & PER_EVENT_BRANCH))
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/isc.h> #include <asm/isc.h>
#include <asm/gmap.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "gaccess.h" #include "gaccess.h"
#include "trace-s390.h" #include "trace-s390.h"
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/etr.h> #include <asm/etr.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/isc.h> #include <asm/isc.h>
...@@ -281,7 +282,7 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm, ...@@ -281,7 +282,7 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm,
for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
address = gfn_to_hva_memslot(memslot, cur_gfn); address = gfn_to_hva_memslot(memslot, cur_gfn);
if (gmap_test_and_clear_dirty(address, gmap)) if (test_and_clear_guest_dirty(gmap->mm, address))
mark_page_dirty(kvm, cur_gfn); mark_page_dirty(kvm, cur_gfn);
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return; return;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/sysinfo.h> #include <asm/sysinfo.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/compat.h> #include <asm/compat.h>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# #
lib-y += delay.o string.o uaccess.o find.o lib-y += delay.o string.o uaccess.o find.o
obj-y += mem.o obj-y += mem.o xor.o
lib-$(CONFIG_SMP) += spinlock.o lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o lib-$(CONFIG_UPROBES) += probes.o
/*
* Optimized xor_block operation for RAID4/5
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/raid/xor.h>
static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
"0: xc 0(256,%1),0(%2)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
"3:\n"
: : "d" (bytes), "a" (p1), "a" (p2)
: "0", "1", "cc", "memory");
}
static void xor_xc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
"0: xc 0(256,%1),0(%2)\n"
" xc 0(256,%1),0(%3)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" la %3,256(%3)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3)
: : "0", "1", "cc", "memory");
}
static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
"0: xc 0(256,%1),0(%2)\n"
" xc 0(256,%1),0(%3)\n"
" xc 0(256,%1),0(%4)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" la %3,256(%3)\n"
" la %4,256(%4)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" ex %0,12(1)\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
" xc 0(1,%1),0(%4)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4)
: : "0", "1", "cc", "memory");
}
static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
/* Get around a gcc oddity */
register unsigned long *reg7 asm ("7") = p5;
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
"0: xc 0(256,%1),0(%2)\n"
" xc 0(256,%1),0(%3)\n"
" xc 0(256,%1),0(%4)\n"
" xc 0(256,%1),0(%5)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" la %3,256(%3)\n"
" la %4,256(%4)\n"
" la %5,256(%5)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" ex %0,12(1)\n"
" ex %0,18(1)\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
" xc 0(1,%1),0(%4)\n"
" xc 0(1,%1),0(%5)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
"+a" (reg7)
: : "0", "1", "cc", "memory");
}
struct xor_block_template xor_block_xc = {
.name = "xc",
.do_2 = xor_xc_2,
.do_3 = xor_xc_3,
.do_4 = xor_xc_4,
.do_5 = xor_xc_5,
};
EXPORT_SYMBOL(xor_block_xc);
...@@ -2,9 +2,11 @@ ...@@ -2,9 +2,11 @@
# Makefile for the linux s390-specific parts of the memory manager. # Makefile for the linux s390-specific parts of the memory manager.
# #
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o
obj-y += page-states.o gup.o extable.o pageattr.o mem_detect.o obj-y += page-states.o gup.o extable.o pageattr.o mem_detect.o
obj-y += pgtable.o pgalloc.o
obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
...@@ -265,7 +265,7 @@ query_segment_type (struct dcss_segment *seg) ...@@ -265,7 +265,7 @@ query_segment_type (struct dcss_segment *seg)
goto out_free; goto out_free;
} }
if (diag_cc > 1) { if (diag_cc > 1) {
pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc); pr_warn("Querying a DCSS type failed with rc=%ld\n", vmrc);
rc = dcss_diag_translate_rc (vmrc); rc = dcss_diag_translate_rc (vmrc);
goto out_free; goto out_free;
} }
...@@ -457,8 +457,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long ...@@ -457,8 +457,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
goto out_resource; goto out_resource;
} }
if (diag_cc > 1) { if (diag_cc > 1) {
pr_warning("Loading DCSS %s failed with rc=%ld\n", name, pr_warn("Loading DCSS %s failed with rc=%ld\n", name, end_addr);
end_addr);
rc = dcss_diag_translate_rc(end_addr); rc = dcss_diag_translate_rc(end_addr);
dcss_diag(&purgeseg_scode, seg->dcss_name, dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy); &dummy, &dummy);
...@@ -574,8 +573,7 @@ segment_modify_shared (char *name, int do_nonshared) ...@@ -574,8 +573,7 @@ segment_modify_shared (char *name, int do_nonshared)
goto out_unlock; goto out_unlock;
} }
if (atomic_read (&seg->ref_count) != 1) { if (atomic_read (&seg->ref_count) != 1) {
pr_warning("DCSS %s is in use and cannot be reloaded\n", pr_warn("DCSS %s is in use and cannot be reloaded\n", name);
name);
rc = -EAGAIN; rc = -EAGAIN;
goto out_unlock; goto out_unlock;
} }
...@@ -588,8 +586,8 @@ segment_modify_shared (char *name, int do_nonshared) ...@@ -588,8 +586,8 @@ segment_modify_shared (char *name, int do_nonshared)
seg->res->flags |= IORESOURCE_READONLY; seg->res->flags |= IORESOURCE_READONLY;
if (request_resource(&iomem_resource, seg->res)) { if (request_resource(&iomem_resource, seg->res)) {
pr_warning("DCSS %s overlaps with used memory resources " pr_warn("DCSS %s overlaps with used memory resources and cannot be reloaded\n",
"and cannot be reloaded\n", name); name);
rc = -EBUSY; rc = -EBUSY;
kfree(seg->res); kfree(seg->res);
goto out_del_mem; goto out_del_mem;
...@@ -607,8 +605,8 @@ segment_modify_shared (char *name, int do_nonshared) ...@@ -607,8 +605,8 @@ segment_modify_shared (char *name, int do_nonshared)
goto out_del_res; goto out_del_res;
} }
if (diag_cc > 1) { if (diag_cc > 1) {
pr_warning("Reloading DCSS %s failed with rc=%ld\n", name, pr_warn("Reloading DCSS %s failed with rc=%ld\n",
end_addr); name, end_addr);
rc = dcss_diag_translate_rc(end_addr); rc = dcss_diag_translate_rc(end_addr);
goto out_del_res; goto out_del_res;
} }
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/diag.h> #include <asm/diag.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/facility.h> #include <asm/facility.h>
...@@ -183,6 +184,8 @@ static void dump_fault_info(struct pt_regs *regs) ...@@ -183,6 +184,8 @@ static void dump_fault_info(struct pt_regs *regs)
{ {
unsigned long asce; unsigned long asce;
pr_alert("Failing address: %016lx TEID: %016lx\n",
regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
pr_alert("Fault in "); pr_alert("Fault in ");
switch (regs->int_parm_long & 3) { switch (regs->int_parm_long & 3) {
case 3: case 3:
...@@ -218,7 +221,9 @@ static void dump_fault_info(struct pt_regs *regs) ...@@ -218,7 +221,9 @@ static void dump_fault_info(struct pt_regs *regs)
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK); dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
} }
static inline void report_user_fault(struct pt_regs *regs, long signr) int show_unhandled_signals = 1;
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
{ {
if ((task_pid_nr(current) > 1) && !show_unhandled_signals) if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return; return;
...@@ -230,8 +235,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr) ...@@ -230,8 +235,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
regs->int_code & 0xffff, regs->int_code >> 17); regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr(KERN_CONT "in ", regs->psw.addr); print_vma_addr(KERN_CONT "in ", regs->psw.addr);
printk(KERN_CONT "\n"); printk(KERN_CONT "\n");
printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", if (is_mm_fault)
regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
dump_fault_info(regs); dump_fault_info(regs);
show_regs(regs); show_regs(regs);
} }
...@@ -244,7 +248,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) ...@@ -244,7 +248,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
{ {
struct siginfo si; struct siginfo si;
report_user_fault(regs, SIGSEGV); report_user_fault(regs, SIGSEGV, 1);
si.si_signo = SIGSEGV; si.si_signo = SIGSEGV;
si.si_code = si_code; si.si_code = si_code;
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
...@@ -272,8 +276,6 @@ static noinline void do_no_context(struct pt_regs *regs) ...@@ -272,8 +276,6 @@ static noinline void do_no_context(struct pt_regs *regs)
else else
printk(KERN_ALERT "Unable to handle kernel paging request" printk(KERN_ALERT "Unable to handle kernel paging request"
" in virtual user address space\n"); " in virtual user address space\n");
printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
dump_fault_info(regs); dump_fault_info(regs);
die(regs, "Oops"); die(regs, "Oops");
do_exit(SIGKILL); do_exit(SIGKILL);
......
This diff is collapsed.
...@@ -105,11 +105,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ...@@ -105,11 +105,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
pmd_t *pmdp = (pmd_t *) ptep; pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep); pmd_t old;
pmdp_flush_direct(mm, addr, pmdp); old = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; return __pmd_to_pte(old);
return pte;
} }
pte_t *huge_pte_alloc(struct mm_struct *mm, pte_t *huge_pte_alloc(struct mm_struct *mm,
......
...@@ -65,19 +65,17 @@ static pte_t *walk_page_table(unsigned long addr) ...@@ -65,19 +65,17 @@ static pte_t *walk_page_table(unsigned long addr)
static void change_page_attr(unsigned long addr, int numpages, static void change_page_attr(unsigned long addr, int numpages,
pte_t (*set) (pte_t)) pte_t (*set) (pte_t))
{ {
pte_t *ptep, pte; pte_t *ptep;
int i; int i;
for (i = 0; i < numpages; i++) { for (i = 0; i < numpages; i++) {
ptep = walk_page_table(addr); ptep = walk_page_table(addr);
if (WARN_ON_ONCE(!ptep)) if (WARN_ON_ONCE(!ptep))
break; break;
pte = *ptep; *ptep = set(*ptep);
pte = set(pte);
__ptep_ipte(addr, ptep);
*ptep = pte;
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
__tlb_flush_kernel();
} }
int set_memory_ro(unsigned long addr, int numpages) int set_memory_ro(unsigned long addr, int numpages)
......
/*
* Page table allocation functions
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PGSTE
static int page_table_allocate_pgste_min = 0;
static int page_table_allocate_pgste_max = 1;
int page_table_allocate_pgste = 0;
EXPORT_SYMBOL(page_table_allocate_pgste);
static struct ctl_table page_table_sysctl[] = {
{
.procname = "allocate_pgste",
.data = &page_table_allocate_pgste,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = proc_dointvec,
.extra1 = &page_table_allocate_pgste_min,
.extra2 = &page_table_allocate_pgste_max,
},
{ }
};
static struct ctl_table page_table_sysctl_dir[] = {
{
.procname = "vm",
.maxlen = 0,
.mode = 0555,
.child = page_table_sysctl,
},
{ }
};
static int __init page_table_register_sysctl(void)
{
return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
}
__initcall(page_table_register_sysctl);
#endif /* CONFIG_PGSTE */
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, 2);
if (!page)
return NULL;
return (unsigned long *) page_to_phys(page);
}
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
free_pages((unsigned long) table, 2);
}
static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
if (current->active_mm == mm) {
clear_user_asce();
set_user_asce(mm);
}
__tlb_flush_local();
}
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
unsigned long *table, *pgd;
unsigned long entry;
int flush;
BUG_ON(limit > TASK_MAX_SIZE);
flush = 0;
repeat:
table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
spin_lock_bh(&mm->page_table_lock);
if (mm->context.asce_limit < limit) {
pgd = (unsigned long *) mm->pgd;
if (mm->context.asce_limit <= (1UL << 31)) {
entry = _REGION3_ENTRY_EMPTY;
mm->context.asce_limit = 1UL << 42;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_REGION3;
} else {
entry = _REGION2_ENTRY_EMPTY;
mm->context.asce_limit = 1UL << 53;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_REGION2;
}
crst_table_init(table, entry);
pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
mm->pgd = (pgd_t *) table;
mm->task_size = mm->context.asce_limit;
table = NULL;
flush = 1;
}
spin_unlock_bh(&mm->page_table_lock);
if (table)
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
if (flush)
on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
if (current->active_mm == mm) {
clear_user_asce();
__tlb_flush_mm(mm);
}
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
case _REGION_ENTRY_TYPE_R2:
mm->context.asce_limit = 1UL << 42;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_REGION3;
break;
case _REGION_ENTRY_TYPE_R3:
mm->context.asce_limit = 1UL << 31;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_SEGMENT;
break;
default:
BUG();
}
mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
if (current->active_mm == mm)
set_user_asce(mm);
}
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
{
unsigned int old, new;
do {
old = atomic_read(v);
new = old ^ bits;
} while (atomic_cmpxchg(v, old, new) != old);
return new;
}
/*
* page table entry allocation/free routines.
*/
unsigned long *page_table_alloc(struct mm_struct *mm)
{
unsigned long *table;
struct page *page;
unsigned int mask, bit;
/* Try to get a fragment of a 4K page as a 2K page table */
if (!mm_alloc_pgste(mm)) {
table = NULL;
spin_lock_bh(&mm->context.list_lock);
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
struct page, lru);
mask = atomic_read(&page->_mapcount);
mask = (mask | (mask >> 4)) & 3;
if (mask != 3) {
table = (unsigned long *) page_to_phys(page);
bit = mask & 1; /* =1 -> second 2K */
if (bit)
table += PTRS_PER_PTE;
atomic_xor_bits(&page->_mapcount, 1U << bit);
list_del(&page->lru);
}
}
spin_unlock_bh(&mm->context.list_lock);
if (table)
return table;
}
/* Allocate a fresh page */
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
/* Initialize page table */
table = (unsigned long *) page_to_phys(page);
if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */
atomic_set(&page->_mapcount, 3);
clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
} else {
/* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1);
clear_table(table, _PAGE_INVALID, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock);
}
return table;
}
void page_table_free(struct mm_struct *mm, unsigned long *table)
{
struct page *page;
unsigned int bit, mask;
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
if (!mm_alloc_pgste(mm)) {
/* Free 2K page table fragment of a 4K page */
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.list_lock);
mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
if (mask & 3)
list_add(&page->lru, &mm->context.pgtable_list);
else
list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock);
if (mask != 0)
return;
}
pgtable_page_dtor(page);
atomic_set(&page->_mapcount, -1);
__free_page(page);
}
void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
unsigned long vmaddr)
{
struct mm_struct *mm;
struct page *page;
unsigned int bit, mask;
mm = tlb->mm;
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
if (mm_alloc_pgste(mm)) {
gmap_unlink(mm, table, vmaddr);
table = (unsigned long *) (__pa(table) | 3);
tlb_remove_table(tlb, table);
return;
}
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.list_lock);
mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
if (mask & 3)
list_add_tail(&page->lru, &mm->context.pgtable_list);
else
list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock);
table = (unsigned long *) (__pa(table) | (1U << bit));
tlb_remove_table(tlb, table);
}
static void __tlb_remove_table(void *_table)
{
unsigned int mask = (unsigned long) _table & 3;
void *table = (void *)((unsigned long) _table ^ mask);
struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
switch (mask) {
case 0: /* pmd or pud */
free_pages((unsigned long) table, 2);
break;
case 1: /* lower 2K of a 4K page table */
case 2: /* higher 2K of a 4K page table */
if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
break;
/* fallthrough */
case 3: /* 4K page table with pgstes */
pgtable_page_dtor(page);
atomic_set(&page->_mapcount, -1);
__free_page(page);
break;
}
}
static void tlb_remove_table_smp_sync(void *arg)
{
/* Simply deliver the interrupt */
}
static void tlb_remove_table_one(void *table)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
* assumed to be actually RCU-freed.
*
* It is however sufficient for software page-table walkers that rely
* on IRQ disabling. See the comment near struct mmu_table_batch.
*/
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
__tlb_remove_table(table);
}
static void tlb_remove_table_rcu(struct rcu_head *head)
{
struct mmu_table_batch *batch;
int i;
batch = container_of(head, struct mmu_table_batch, rcu);
for (i = 0; i < batch->nr; i++)
__tlb_remove_table(batch->tables[i]);
free_page((unsigned long)batch);
}
void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
if (*batch) {
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
tlb->mm->context.flush_mm = 1;
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)
__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
__tlb_flush_mm_lazy(tlb->mm);
tlb_remove_table_one(table);
return;
}
(*batch)->nr = 0;
}
(*batch)->tables[(*batch)->nr++] = table;
if ((*batch)->nr == MAX_TABLE_BATCH)
tlb_flush_mmu(tlb);
}
This diff is collapsed.
...@@ -6,5 +6,5 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ ...@@ -6,5 +6,5 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \ oprofilefs.o oprofile_stats.o \
timer_int.o ) timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o oprofile-y := $(DRIVER_OBJS) init.o
oprofile-y += hwsampler.o oprofile-y += hwsampler.o
This diff is collapsed.
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#include "../../../drivers/oprofile/oprof.h" #include "../../../drivers/oprofile/oprof.h"
extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
#include "hwsampler.h" #include "hwsampler.h"
#include "op_counter.h" #include "op_counter.h"
...@@ -456,6 +454,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) ...@@ -456,6 +454,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break; case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
case 0x2964: case 0x2965: ops->cpu_type = "s390/z13"; break;
default: return -ENODEV; default: return -ENODEV;
} }
} }
...@@ -494,6 +493,24 @@ static void oprofile_hwsampler_exit(void) ...@@ -494,6 +493,24 @@ static void oprofile_hwsampler_exit(void)
hwsampler_shutdown(); hwsampler_shutdown();
} }
static int __s390_backtrace(void *data, unsigned long address)
{
unsigned int *depth = data;
if (*depth == 0)
return 1;
(*depth)--;
oprofile_add_trace(address);
return 0;
}
static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
{
if (user_mode(regs))
return;
dump_trace(__s390_backtrace, &depth, NULL, regs->gprs[15]);
}
int __init oprofile_arch_init(struct oprofile_operations *ops) int __init oprofile_arch_init(struct oprofile_operations *ops)
{ {
ops->backtrace = s390_backtrace; ops->backtrace = s390_backtrace;
......
This diff is collapsed.
This diff is collapsed.
...@@ -128,10 +128,9 @@ static const struct file_operations debugfs_pci_perf_fops = { ...@@ -128,10 +128,9 @@ static const struct file_operations debugfs_pci_perf_fops = {
.release = single_release, .release = single_release,
}; };
void zpci_debug_init_device(struct zpci_dev *zdev) void zpci_debug_init_device(struct zpci_dev *zdev, const char *name)
{ {
zdev->debugfs_dev = debugfs_create_dir(dev_name(&zdev->pdev->dev), zdev->debugfs_dev = debugfs_create_dir(name, debugfs_root);
debugfs_root);
if (IS_ERR(zdev->debugfs_dev)) if (IS_ERR(zdev->debugfs_dev))
zdev->debugfs_dev = NULL; zdev->debugfs_dev = NULL;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -178,8 +178,8 @@ int dasd_gendisk_init(void) ...@@ -178,8 +178,8 @@ int dasd_gendisk_init(void)
/* Register to static dasd major 94 */ /* Register to static dasd major 94 */
rc = register_blkdev(DASD_MAJOR, "dasd"); rc = register_blkdev(DASD_MAJOR, "dasd");
if (rc != 0) { if (rc != 0) {
pr_warning("Registering the device driver with major number " pr_warn("Registering the device driver with major number %d failed\n",
"%d failed\n", DASD_MAJOR); DASD_MAJOR);
return rc; return rc;
} }
return 0; return 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment