Commit 8b646bd7 authored by Martin Schwidefsky's avatar Martin Schwidefsky

[S390] rework smp code

Define struct pcpu and merge some of the NR_CPUS arrays into it, including
__cpu_logical_map, current_set and smp_cpu_state. Split smp related
functions to those operating on physical cpus and the functions operating
on a logical cpu number. Make the functions for physical cpus use a
pointer to a struct pcpu. This hides the knowledge about cpu addresses in
smp.c, entry[64].S and swsusp_asm64.S, thus remove the sigp.h header.

The PSW restart mechanism is used to start secondary cpus, calling a
function on an online cpu, calling a function on the ipl cpu, and for
the nmi signal. Replace the different assembler functions with a
single function restart_int_handler. The new entry point calls a function
whose pointer is stored in the lowcore of the target cpu and it can wait
for the source cpu to stop. This covers all existing use cases.

Overall the code is now simpler and there are ~380 lines less code.
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 7e180bd8
/* /*
* Copyright IBM Corp. 1999,2010 * Copyright IBM Corp. 1999,2012
* Author(s): Hartmut Penner <hp@de.ibm.com>, * Author(s): Hartmut Penner <hp@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow, * Denis Joseph Barrow,
...@@ -12,14 +12,6 @@ ...@@ -12,14 +12,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/cpu.h> #include <asm/cpu.h>
void restart_int_handler(void);
void ext_int_handler(void);
void system_call(void);
void pgm_check_handler(void);
void mcck_int_handler(void);
void io_int_handler(void);
void psw_restart_int_handler(void);
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
#define LC_ORDER 0 #define LC_ORDER 0
...@@ -117,32 +109,37 @@ struct _lowcore { ...@@ -117,32 +109,37 @@ struct _lowcore {
__u64 steal_timer; /* 0x0288 */ __u64 steal_timer; /* 0x0288 */
__u64 last_update_timer; /* 0x0290 */ __u64 last_update_timer; /* 0x0290 */
__u64 last_update_clock; /* 0x0298 */ __u64 last_update_clock; /* 0x0298 */
__u64 int_clock; /* 0x02a0 */
__u64 mcck_clock; /* 0x02a8 */
__u64 clock_comparator; /* 0x02b0 */
/* Current process. */ /* Current process. */
__u32 current_task; /* 0x02a0 */ __u32 current_task; /* 0x02b8 */
__u32 thread_info; /* 0x02a4 */ __u32 thread_info; /* 0x02bc */
__u32 kernel_stack; /* 0x02a8 */ __u32 kernel_stack; /* 0x02c0 */
/* Interrupt, panic and restart stack. */
__u32 async_stack; /* 0x02c4 */
__u32 panic_stack; /* 0x02c8 */
__u32 restart_stack; /* 0x02cc */
/* Interrupt and panic stack. */ /* Restart function and parameter. */
__u32 async_stack; /* 0x02ac */ __u32 restart_fn; /* 0x02d0 */
__u32 panic_stack; /* 0x02b0 */ __u32 restart_data; /* 0x02d4 */
__u32 restart_source; /* 0x02d8 */
/* Address space pointer. */ /* Address space pointer. */
__u32 kernel_asce; /* 0x02b4 */ __u32 kernel_asce; /* 0x02dc */
__u32 user_asce; /* 0x02b8 */ __u32 user_asce; /* 0x02e0 */
__u32 current_pid; /* 0x02bc */ __u32 current_pid; /* 0x02e4 */
/* SMP info area */ /* SMP info area */
__u32 cpu_nr; /* 0x02c0 */ __u32 cpu_nr; /* 0x02e8 */
__u32 softirq_pending; /* 0x02c4 */ __u32 softirq_pending; /* 0x02ec */
__u32 percpu_offset; /* 0x02c8 */ __u32 percpu_offset; /* 0x02f0 */
__u32 ext_call_fast; /* 0x02cc */ __u32 machine_flags; /* 0x02f4 */
__u64 int_clock; /* 0x02d0 */ __u32 ftrace_func; /* 0x02f8 */
__u64 mcck_clock; /* 0x02d8 */ __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
__u64 clock_comparator; /* 0x02e0 */
__u32 machine_flags; /* 0x02e8 */
__u32 ftrace_func; /* 0x02ec */
__u8 pad_0x02f8[0x0300-0x02f0]; /* 0x02f0 */
/* Interrupt response block */ /* Interrupt response block */
__u8 irb[64]; /* 0x0300 */ __u8 irb[64]; /* 0x0300 */
...@@ -254,34 +251,39 @@ struct _lowcore { ...@@ -254,34 +251,39 @@ struct _lowcore {
__u64 steal_timer; /* 0x02e0 */ __u64 steal_timer; /* 0x02e0 */
__u64 last_update_timer; /* 0x02e8 */ __u64 last_update_timer; /* 0x02e8 */
__u64 last_update_clock; /* 0x02f0 */ __u64 last_update_clock; /* 0x02f0 */
__u64 int_clock; /* 0x02f8 */
__u64 mcck_clock; /* 0x0300 */
__u64 clock_comparator; /* 0x0308 */
/* Current process. */ /* Current process. */
__u64 current_task; /* 0x02f8 */ __u64 current_task; /* 0x0310 */
__u64 thread_info; /* 0x0300 */ __u64 thread_info; /* 0x0318 */
__u64 kernel_stack; /* 0x0308 */ __u64 kernel_stack; /* 0x0320 */
/* Interrupt, panic and restart stack. */
__u64 async_stack; /* 0x0328 */
__u64 panic_stack; /* 0x0330 */
__u64 restart_stack; /* 0x0338 */
/* Interrupt and panic stack. */ /* Restart function and parameter. */
__u64 async_stack; /* 0x0310 */ __u64 restart_fn; /* 0x0340 */
__u64 panic_stack; /* 0x0318 */ __u64 restart_data; /* 0x0348 */
__u64 restart_source; /* 0x0350 */
/* Address space pointer. */ /* Address space pointer. */
__u64 kernel_asce; /* 0x0320 */ __u64 kernel_asce; /* 0x0358 */
__u64 user_asce; /* 0x0328 */ __u64 user_asce; /* 0x0360 */
__u64 current_pid; /* 0x0330 */ __u64 current_pid; /* 0x0368 */
/* SMP info area */ /* SMP info area */
__u32 cpu_nr; /* 0x0338 */ __u32 cpu_nr; /* 0x0370 */
__u32 softirq_pending; /* 0x033c */ __u32 softirq_pending; /* 0x0374 */
__u64 percpu_offset; /* 0x0340 */ __u64 percpu_offset; /* 0x0378 */
__u64 ext_call_fast; /* 0x0348 */ __u64 vdso_per_cpu_data; /* 0x0380 */
__u64 int_clock; /* 0x0350 */ __u64 machine_flags; /* 0x0388 */
__u64 mcck_clock; /* 0x0358 */ __u64 ftrace_func; /* 0x0390 */
__u64 clock_comparator; /* 0x0360 */ __u64 gmap; /* 0x0398 */
__u64 vdso_per_cpu_data; /* 0x0368 */ __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
__u64 machine_flags; /* 0x0370 */
__u64 ftrace_func; /* 0x0378 */
__u64 gmap; /* 0x0380 */
__u8 pad_0x0388[0x0400-0x0388]; /* 0x0388 */
/* Interrupt response block. */ /* Interrupt response block. */
__u8 irb[64]; /* 0x0400 */ __u8 irb[64]; /* 0x0400 */
......
/*
* Routines and structures for signalling other processors.
*
* Copyright IBM Corp. 1999,2010
* Author(s): Denis Joseph Barrow,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#ifndef __ASM_SIGP_H
#define __ASM_SIGP_H
#include <asm/system.h>
/* Get real cpu address from logical cpu number. */
extern unsigned short __cpu_logical_map[];
static inline int cpu_logical_map(int cpu)
{
#ifdef CONFIG_SMP
return __cpu_logical_map[cpu];
#else
return stap();
#endif
}
enum {
sigp_sense = 1,
sigp_external_call = 2,
sigp_emergency_signal = 3,
sigp_start = 4,
sigp_stop = 5,
sigp_restart = 6,
sigp_stop_and_store_status = 9,
sigp_initial_cpu_reset = 11,
sigp_cpu_reset = 12,
sigp_set_prefix = 13,
sigp_store_status_at_address = 14,
sigp_store_extended_status_at_address = 15,
sigp_set_architecture = 18,
sigp_conditional_emergency_signal = 19,
sigp_sense_running = 21,
};
enum {
sigp_order_code_accepted = 0,
sigp_status_stored = 1,
sigp_busy = 2,
sigp_not_operational = 3,
};
/*
* Definitions for external call.
*/
enum {
ec_schedule = 0,
ec_call_function,
ec_call_function_single,
ec_stop_cpu,
};
/*
* Signal processor.
*/
static inline int raw_sigp(u16 cpu, int order)
{
register unsigned long reg1 asm ("1") = 0;
int ccode;
asm volatile(
" sigp %1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode)
: "d" (reg1), "d" (cpu),
"a" (order) : "cc" , "memory");
return ccode;
}
/*
* Signal processor with parameter.
*/
static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
{
register unsigned int reg1 asm ("1") = parameter;
int ccode;
asm volatile(
" sigp %1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode)
: "d" (reg1), "d" (cpu),
"a" (order) : "cc" , "memory");
return ccode;
}
/*
* Signal processor with parameter and return status.
*/
static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
{
register unsigned int reg1 asm ("1") = parm;
int ccode;
asm volatile(
" sigp %1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode), "+d" (reg1)
: "d" (cpu), "a" (order)
: "cc" , "memory");
*status = reg1;
return ccode;
}
static inline int sigp(int cpu, int order)
{
return raw_sigp(cpu_logical_map(cpu), order);
}
static inline int sigp_p(u32 parameter, int cpu, int order)
{
return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
}
static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
{
return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
}
#endif /* __ASM_SIGP_H */
/* /*
* Copyright IBM Corp. 1999,2009 * Copyright IBM Corp. 1999,2012
* Author(s): Denis Joseph Barrow, * Author(s): Denis Joseph Barrow,
* Martin Schwidefsky <schwidefsky@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com>,
...@@ -10,71 +10,52 @@ ...@@ -10,71 +10,52 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <asm/system.h> #include <asm/system.h>
#include <asm/sigp.h>
extern void machine_restart_smp(char *);
extern void machine_halt_smp(void);
extern void machine_power_off_smp(void);
#define raw_smp_processor_id() (S390_lowcore.cpu_nr) #define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex; extern struct mutex smp_cpu_state_mutex;
extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern int __cpu_up(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; extern void smp_call_online_cpu(void (*func)(void *), void *);
extern void smp_call_ipl_cpu(void (*func)(void *), void *);
extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
int from, int to);
extern void smp_restart_with_online_cpu(void);
extern void smp_restart_cpu(void);
/* extern int smp_find_processor_id(u16 address);
* returns 1 if (virtual) cpu is scheduled extern int smp_store_status(int cpu);
* returns 0 otherwise extern int smp_vcpu_scheduled(int cpu);
*/ extern void smp_yield_cpu(int cpu);
static inline int smp_vcpu_scheduled(int cpu) extern void smp_yield(void);
{ extern void smp_stop_cpu(void);
u32 status;
switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
case sigp_status_stored:
/* Check for running status */
if (status & 0x400)
return 0;
break;
case sigp_not_operational:
return 0;
default:
break;
}
return 1;
}
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
{ {
func(data); func(data);
} }
static inline void smp_restart_with_online_cpu(void) static inline void smp_call_online_cpu(void (*func)(void *), void *data)
{ {
func(data);
} }
#define smp_vcpu_scheduled (1) static inline int smp_find_processor_id(int address) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
static inline void smp_stop_cpu(void) { }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void); extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void); extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
extern int __cpu_disable(void);
#else #else
static inline int smp_rescan_cpus(void) { return 0; } static inline int smp_rescan_cpus(void) { return 0; }
static inline void cpu_die(void) { } static inline void cpu_die(void) { }
......
...@@ -40,8 +40,8 @@ struct vdso_per_cpu_data { ...@@ -40,8 +40,8 @@ struct vdso_per_cpu_data {
extern struct vdso_data *vdso_data; extern struct vdso_data *vdso_data;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore); int vdso_alloc_per_cpu(struct _lowcore *lowcore);
void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore); void vdso_free_per_cpu(struct _lowcore *lowcore);
#endif #endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -34,8 +34,6 @@ extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) ...@@ -34,8 +34,6 @@ extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_BOOK) += topology.o obj-$(CONFIG_SCHED_BOOK) += topology.o
obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
switch_cpu.o)
obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
obj-$(CONFIG_AUDIT) += audit.o obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o
......
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/sigp.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h>
/* /*
* Make sure that the compiler is new enough. We want a compiler that * Make sure that the compiler is new enough. We want a compiler that
...@@ -70,12 +70,6 @@ int main(void) ...@@ -70,12 +70,6 @@ int main(void)
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK(); BLANK();
/* constants for SIGP */
DEFINE(__SIGP_STOP, sigp_stop);
DEFINE(__SIGP_RESTART, sigp_restart);
DEFINE(__SIGP_SENSE, sigp_sense);
DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
BLANK();
/* lowcore offsets */ /* lowcore offsets */
DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
...@@ -95,20 +89,19 @@ int main(void) ...@@ -95,20 +89,19 @@ int main(void)
DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
BLANK();
DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
BLANK();
DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
...@@ -129,12 +122,16 @@ int main(void) ...@@ -129,12 +122,16 @@ int main(void)
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
BLANK();
DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* arch/s390/kernel/entry.S * arch/s390/kernel/entry.S
* S390 low-level entry points. * S390 low-level entry points.
* *
* Copyright (C) IBM Corp. 1999,2006 * Copyright (C) IBM Corp. 1999,2012
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Hartmut Penner (hp@de.ibm.com), * Hartmut Penner (hp@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
...@@ -691,77 +691,30 @@ mcck_panic: ...@@ -691,77 +691,30 @@ mcck_panic:
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip j mcck_skip
/*
* Restart interruption handler, kick starter for additional CPUs
*/
#ifdef CONFIG_SMP
__CPUINIT
ENTRY(restart_int_handler)
basr %r1,0
restart_base:
spt restart_vtime-restart_base(%r1)
stck __LC_LAST_UPDATE_CLOCK
mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
lam %a0,%a15,__LC_AREGS_SAVE_AREA
lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone
l %r1,__LC_THREAD_INFO
mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
basr %r14,0
l %r14,restart_addr-.(%r14)
basr %r14,%r14 # call start_secondary
restart_addr:
.long start_secondary
.align 8
restart_vtime:
.long 0x7fffffff,0xffffffff
.previous
#else
/*
* If we do not run with SMP enabled, let the new CPU crash ...
*/
ENTRY(restart_int_handler)
basr %r1,0
restart_base:
lpsw restart_crash-restart_base(%r1)
.align 8
restart_crash:
.long 0x000a0000,0x00000000
restart_go:
#endif
# #
# PSW restart interrupt handler # PSW restart interrupt handler
# #
ENTRY(psw_restart_int_handler) ENTRY(restart_int_handler)
st %r15,__LC_SAVE_AREA_RESTART st %r15,__LC_SAVE_AREA_RESTART
basr %r15,0 l %r15,__LC_RESTART_STACK
0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
l %r15,0(%r15)
ahi %r15,-__PT_SIZE # create pt_regs on stack ahi %r15,-__PT_SIZE # create pt_regs on stack
xc 0(__PT_SIZE,%r15),0(%r15)
stm %r0,%r14,__PT_R0(%r15) stm %r0,%r14,__PT_R0(%r15)
mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
ahi %r15,-STACK_FRAME_OVERHEAD ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
basr %r14,0 lm %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu
1: l %r14,.Ldo_restart-1b(%r14) ltr %r3,%r3 # test source cpu address
basr %r14,%r14 jm 1f # negative -> skip source stop
basr %r14,0 # load disabled wait PSW if 0: sigp %r4,%r3,1 # sigp sense to source cpu
2: lpsw restart_psw_crash-2b(%r14) # do_restart returns brc 10,0b # wait for status stored
.align 4 1: basr %r14,%r1 # call function
.Ldo_restart: stap __SF_EMPTY(%r15) # store cpu address
.long do_restart lh %r3,__SF_EMPTY(%r15)
.Lrestart_stack: 2: sigp %r4,%r3,5 # sigp stop to current cpu
.long restart_stack brc 2,2b
.align 8 3: j 3b
restart_psw_crash:
.long 0x000a0000,0x00000000 + restart_psw_crash
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
......
...@@ -9,6 +9,14 @@ ...@@ -9,6 +9,14 @@
extern void (*pgm_check_table[128])(struct pt_regs *); extern void (*pgm_check_table[128])(struct pt_regs *);
extern void *restart_stack; extern void *restart_stack;
void system_call(void);
void pgm_check_handler(void);
void ext_int_handler(void);
void io_int_handler(void);
void mcck_int_handler(void);
void restart_int_handler(void);
void restart_call_handler(void);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
...@@ -26,7 +34,6 @@ void do_notify_resume(struct pt_regs *regs); ...@@ -26,7 +34,6 @@ void do_notify_resume(struct pt_regs *regs);
void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
void do_restart(void); void do_restart(void);
int __cpuinit start_secondary(void *cpuvoid);
void __init startup_init(void); void __init startup_init(void);
void die(struct pt_regs *regs, const char *str); void die(struct pt_regs *regs, const char *str);
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* arch/s390/kernel/entry64.S * arch/s390/kernel/entry64.S
* S390 low-level entry points. * S390 low-level entry points.
* *
* Copyright (C) IBM Corp. 1999,2010 * Copyright (C) IBM Corp. 1999,2012
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Hartmut Penner (hp@de.ibm.com), * Hartmut Penner (hp@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
...@@ -713,68 +713,30 @@ mcck_panic: ...@@ -713,68 +713,30 @@ mcck_panic:
0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip j mcck_skip
/*
* Restart interruption handler, kick starter for additional CPUs
*/
#ifdef CONFIG_SMP
__CPUINIT
ENTRY(restart_int_handler)
basr %r1,0
restart_base:
spt restart_vtime-restart_base(%r1)
stck __LC_LAST_UPDATE_CLOCK
mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
lghi %r10,__LC_GPREGS_SAVE_AREA
lg %r15,120(%r10) # load ksp
lghi %r10,__LC_CREGS_SAVE_AREA
lctlg %c0,%c15,0(%r10) # get new ctl regs
lghi %r10,__LC_AREGS_SAVE_AREA
lam %a0,%a15,0(%r10)
lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone
lg %r1,__LC_THREAD_INFO
mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
brasl %r14,start_secondary
.align 8
restart_vtime:
.long 0x7fffffff,0xffffffff
.previous
#else
/*
* If we do not run with SMP enabled, let the new CPU crash ...
*/
ENTRY(restart_int_handler)
basr %r1,0
restart_base:
lpswe restart_crash-restart_base(%r1)
.align 8
restart_crash:
.long 0x000a0000,0x00000000,0x00000000,0x00000000
restart_go:
#endif
# #
# PSW restart interrupt handler # PSW restart interrupt handler
# #
ENTRY(psw_restart_int_handler) ENTRY(restart_int_handler)
stg %r15,__LC_SAVE_AREA_RESTART stg %r15,__LC_SAVE_AREA_RESTART
larl %r15,restart_stack # load restart stack lg %r15,__LC_RESTART_STACK
lg %r15,0(%r15)
aghi %r15,-__PT_SIZE # create pt_regs on stack aghi %r15,-__PT_SIZE # create pt_regs on stack
xc 0(__PT_SIZE,%r15),0(%r15)
stmg %r0,%r14,__PT_R0(%r15) stmg %r0,%r14,__PT_R0(%r15)
mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
aghi %r15,-STACK_FRAME_OVERHEAD aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
brasl %r14,do_restart lmg %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu
larl %r14,restart_psw_crash # load disabled wait PSW if ltgr %r3,%r3 # test source cpu address
lpswe 0(%r14) # do_restart returns jm 1f # negative -> skip source stop
.align 8 0: sigp %r4,%r3,1 # sigp sense to source cpu
restart_psw_crash: brc 10,0b # wait for status stored
.quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash 1: basr %r14,%r1 # call function
stap __SF_EMPTY(%r15) # store cpu address
llgh %r3,__SF_EMPTY(%r15)
2: sigp %r4,%r3,5 # sigp stop to current cpu
brc 2,2b
3: j 3b
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* arch/s390/kernel/ipl.c * arch/s390/kernel/ipl.c
* ipl/reipl/dump support for Linux on s390. * ipl/reipl/dump support for Linux on s390.
* *
* Copyright IBM Corp. 2005,2007 * Copyright IBM Corp. 2005,2012
* Author(s): Michael Holzheu <holzheu@de.ibm.com> * Author(s): Michael Holzheu <holzheu@de.ibm.com>
* Heiko Carstens <heiko.carstens@de.ibm.com> * Heiko Carstens <heiko.carstens@de.ibm.com>
* Volker Sameske <sameske@de.ibm.com> * Volker Sameske <sameske@de.ibm.com>
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/reset.h> #include <asm/reset.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/sigp.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include "entry.h" #include "entry.h"
...@@ -571,7 +570,7 @@ static void __ipl_run(void *unused) ...@@ -571,7 +570,7 @@ static void __ipl_run(void *unused)
static void ipl_run(struct shutdown_trigger *trigger) static void ipl_run(struct shutdown_trigger *trigger)
{ {
smp_switch_to_ipl_cpu(__ipl_run, NULL); smp_call_ipl_cpu(__ipl_run, NULL);
} }
static int __init ipl_init(void) static int __init ipl_init(void)
...@@ -1101,7 +1100,7 @@ static void __reipl_run(void *unused) ...@@ -1101,7 +1100,7 @@ static void __reipl_run(void *unused)
static void reipl_run(struct shutdown_trigger *trigger) static void reipl_run(struct shutdown_trigger *trigger)
{ {
smp_switch_to_ipl_cpu(__reipl_run, NULL); smp_call_ipl_cpu(__reipl_run, NULL);
} }
static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
...@@ -1421,7 +1420,7 @@ static void dump_run(struct shutdown_trigger *trigger) ...@@ -1421,7 +1420,7 @@ static void dump_run(struct shutdown_trigger *trigger)
if (dump_method == DUMP_METHOD_NONE) if (dump_method == DUMP_METHOD_NONE)
return; return;
smp_send_stop(); smp_send_stop();
smp_switch_to_ipl_cpu(__dump_run, NULL); smp_call_ipl_cpu(__dump_run, NULL);
} }
static int __init dump_ccw_init(void) static int __init dump_ccw_init(void)
...@@ -1623,9 +1622,7 @@ static void stop_run(struct shutdown_trigger *trigger) ...@@ -1623,9 +1622,7 @@ static void stop_run(struct shutdown_trigger *trigger)
if (strcmp(trigger->name, ON_PANIC_STR) == 0 || if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
strcmp(trigger->name, ON_RESTART_STR) == 0) strcmp(trigger->name, ON_RESTART_STR) == 0)
disabled_wait((unsigned long) __builtin_return_address(0)); disabled_wait((unsigned long) __builtin_return_address(0));
while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) smp_stop_cpu();
cpu_relax();
for (;;);
} }
static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
...@@ -1738,9 +1735,8 @@ static ssize_t on_restart_store(struct kobject *kobj, ...@@ -1738,9 +1735,8 @@ static ssize_t on_restart_store(struct kobject *kobj,
static struct kobj_attribute on_restart_attr = static struct kobj_attribute on_restart_attr =
__ATTR(on_restart, 0644, on_restart_show, on_restart_store); __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
void do_restart(void) static void __do_restart(void *ignore)
{ {
smp_restart_with_online_cpu();
smp_send_stop(); smp_send_stop();
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
crash_kexec(NULL); crash_kexec(NULL);
...@@ -1749,6 +1745,11 @@ void do_restart(void) ...@@ -1749,6 +1745,11 @@ void do_restart(void)
stop_run(&on_restart_trigger); stop_run(&on_restart_trigger);
} }
void do_restart(void)
{
smp_call_online_cpu(__do_restart, NULL);
}
/* on halt */ /* on halt */
static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
......
...@@ -48,51 +48,22 @@ static void add_elf_notes(int cpu) ...@@ -48,51 +48,22 @@ static void add_elf_notes(int cpu)
memset(ptr, 0, sizeof(struct elf_note)); memset(ptr, 0, sizeof(struct elf_note));
} }
/*
* Store status of next available physical CPU
*/
static int store_status_next(int start_cpu, int this_cpu)
{
struct save_area *sa = (void *) 4608 + store_prefix();
int cpu, rc;
for (cpu = start_cpu; cpu < 65536; cpu++) {
if (cpu == this_cpu)
continue;
do {
rc = raw_sigp(cpu, sigp_stop_and_store_status);
} while (rc == sigp_busy);
if (rc != sigp_order_code_accepted)
continue;
if (sa->pref_reg)
return cpu;
}
return -1;
}
/* /*
* Initialize CPU ELF notes * Initialize CPU ELF notes
*/ */
void setup_regs(void) void setup_regs(void)
{ {
unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
int cpu, this_cpu, phys_cpu = 0, first = 1; int cpu, this_cpu;
this_cpu = stap();
if (!S390_lowcore.prefixreg_save_area) this_cpu = smp_find_processor_id(stap());
first = 0; add_elf_notes(this_cpu);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (first) { if (cpu == this_cpu)
add_elf_notes(cpu); continue;
first = 0; if (smp_store_status(cpu))
continue; continue;
}
phys_cpu = store_status_next(phys_cpu, this_cpu);
if (phys_cpu == -1)
break;
add_elf_notes(cpu); add_elf_notes(cpu);
phys_cpu++;
} }
/* Copy dump CPU store status info to absolute zero */ /* Copy dump CPU store status info to absolute zero */
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
...@@ -255,5 +226,5 @@ void machine_kexec(struct kimage *image) ...@@ -255,5 +226,5 @@ void machine_kexec(struct kimage *image)
return; return;
tracer_disable(); tracer_disable();
smp_send_stop(); smp_send_stop();
smp_switch_to_ipl_cpu(__machine_kexec, image); smp_call_ipl_cpu(__machine_kexec, image);
} }
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* arch/s390/kernel/setup.c * arch/s390/kernel/setup.c
* *
* S390 version * S390 version
* Copyright (C) IBM Corp. 1999,2010 * Copyright (C) IBM Corp. 1999,2012
* Author(s): Hartmut Penner (hp@de.ibm.com), * Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com)
* *
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/kvm_virtio.h> #include <asm/kvm_virtio.h>
#include <asm/diag.h> #include <asm/diag.h>
#include "entry.h"
long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
PSW_MASK_EA | PSW_MASK_BA; PSW_MASK_EA | PSW_MASK_BA;
...@@ -351,8 +352,9 @@ static void setup_addressing_mode(void) ...@@ -351,8 +352,9 @@ static void setup_addressing_mode(void)
} }
} }
static void __init void *restart_stack __attribute__((__section__(".data")));
setup_lowcore(void)
static void __init setup_lowcore(void)
{ {
struct _lowcore *lc; struct _lowcore *lc;
...@@ -363,7 +365,7 @@ setup_lowcore(void) ...@@ -363,7 +365,7 @@ setup_lowcore(void)
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
lc->restart_psw.mask = psw_kernel_bits; lc->restart_psw.mask = psw_kernel_bits;
lc->restart_psw.addr = lc->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
lc->external_new_psw.mask = psw_kernel_bits | lc->external_new_psw.mask = psw_kernel_bits |
PSW_MASK_DAT | PSW_MASK_MCHECK; PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->external_new_psw.addr = lc->external_new_psw.addr =
...@@ -412,6 +414,24 @@ setup_lowcore(void) ...@@ -412,6 +414,24 @@ setup_lowcore(void)
lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock; lc->last_update_clock = S390_lowcore.last_update_clock;
lc->ftrace_func = S390_lowcore.ftrace_func; lc->ftrace_func = S390_lowcore.ftrace_func;
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
restart_stack += ASYNC_SIZE;
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necesary if
* PSW restart is done on an offline CPU that has lowcore zero.
*/
lc->restart_stack = (unsigned long) restart_stack;
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1UL;
memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
4*sizeof(unsigned long));
copy_to_absolute_zero(&S390_lowcore.restart_psw,
&lc->restart_psw, sizeof(psw_t));
set_prefix((u32)(unsigned long) lc); set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc; lowcore_ptr[0] = lc;
} }
...@@ -572,27 +592,6 @@ static void __init setup_memory_end(void) ...@@ -572,27 +592,6 @@ static void __init setup_memory_end(void)
} }
} }
void *restart_stack __attribute__((__section__(".data")));
/*
* Setup new PSW and allocate stack for PSW restart interrupt
*/
static void __init setup_restart_psw(void)
{
psw_t psw;
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
restart_stack += ASYNC_SIZE;
/*
* Setup restart PSW for absolute zero lowcore. This is necesary
* if PSW restart is done on an offline CPU that has lowcore zero
*/
psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
}
static void __init setup_vmcoreinfo(void) static void __init setup_vmcoreinfo(void)
{ {
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
...@@ -782,8 +781,7 @@ static void __init reserve_crashkernel(void) ...@@ -782,8 +781,7 @@ static void __init reserve_crashkernel(void)
#endif #endif
} }
static void __init static void __init setup_memory(void)
setup_memory(void)
{ {
unsigned long bootmap_size; unsigned long bootmap_size;
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
...@@ -1014,8 +1012,7 @@ static void __init setup_hwcaps(void) ...@@ -1014,8 +1012,7 @@ static void __init setup_hwcaps(void)
* was printed. * was printed.
*/ */
void __init void __init setup_arch(char **cmdline_p)
setup_arch(char **cmdline_p)
{ {
/* /*
* print what head.S has found out about the machine * print what head.S has found out about the machine
...@@ -1068,7 +1065,6 @@ setup_arch(char **cmdline_p) ...@@ -1068,7 +1065,6 @@ setup_arch(char **cmdline_p)
setup_memory(); setup_memory();
setup_resources(); setup_resources();
setup_vmcoreinfo(); setup_vmcoreinfo();
setup_restart_psw();
setup_lowcore(); setup_lowcore();
cpu_init(); cpu_init();
......
This diff is collapsed.
/*
* 31-bit switch cpu code
*
* Copyright IBM Corp. 2009
*
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
# smp_switch_to_cpu switches to destination cpu and executes the passed function
# Parameter: %r2 - function to call
# %r3 - function parameter
# %r4 - stack poiner
# %r5 - current cpu
# %r6 - destination cpu
.section .text
ENTRY(smp_switch_to_cpu)
stm %r6,%r15,__SF_GPRS(%r15)
lr %r1,%r15
ahi %r15,-STACK_FRAME_OVERHEAD
st %r1,__SF_BACKCHAIN(%r15)
basr %r13,0
0: la %r1,.gprregs_addr-0b(%r13)
l %r1,0(%r1)
stm %r0,%r15,0(%r1)
1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
brc 2,1b /* busy, try again */
2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
brc 2,2b /* busy, try again */
3: j 3b
ENTRY(smp_restart_cpu)
basr %r13,0
0: la %r1,.gprregs_addr-0b(%r13)
l %r1,0(%r1)
lm %r0,%r15,0(%r1)
1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
brc 10,1b /* busy, accepted (status 0), running */
tmll %r0,0x40 /* Test if calling CPU is stopped */
jz 1b
ltr %r4,%r4 /* New stack ? */
jz 1f
lr %r15,%r4
1: lr %r14,%r2 /* r14: Function to call */
lr %r2,%r3 /* r2 : Parameter for function*/
basr %r14,%r14 /* Call function */
.gprregs_addr:
.long .gprregs
.section .data,"aw",@progbits
.gprregs:
.rept 16
.long 0
.endr
/*
* 64-bit switch cpu code
*
* Copyright IBM Corp. 2009
*
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
# smp_switch_to_cpu switches to destination cpu and executes the passed function
# Parameter: %r2 - function to call
# %r3 - function parameter
# %r4 - stack poiner
# %r5 - current cpu
# %r6 - destination cpu
.section .text
ENTRY(smp_switch_to_cpu)
stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
larl %r1,.gprregs
stmg %r0,%r15,0(%r1)
1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
brc 2,1b /* busy, try again */
2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
brc 2,2b /* busy, try again */
3: j 3b
ENTRY(smp_restart_cpu)
larl %r1,.gprregs
lmg %r0,%r15,0(%r1)
1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
brc 10,1b /* busy, accepted (status 0), running */
tmll %r0,0x40 /* Test if calling CPU is stopped */
jz 1b
ltgr %r4,%r4 /* New stack ? */
jz 1f
lgr %r15,%r4
1: lgr %r14,%r2 /* r14: Function to call */
lgr %r2,%r3 /* r2 : Parameter for function*/
basr %r14,%r14 /* Call function */
.section .data,"aw",@progbits
.gprregs:
.rept 16
.quad 0
.endr
...@@ -179,7 +179,7 @@ pgm_check_entry: ...@@ -179,7 +179,7 @@ pgm_check_entry:
larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
3: 3:
sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET sigp %r9,%r1,11 /* sigp initial cpu reset */
brc 8,4f /* accepted */ brc 8,4f /* accepted */
brc 2,3b /* busy, try again */ brc 2,3b /* busy, try again */
...@@ -196,10 +196,10 @@ pgm_check_entry: ...@@ -196,10 +196,10 @@ pgm_check_entry:
lpsw 0(%r3) lpsw 0(%r3)
4: 4:
/* Switch to suspend CPU */ /* Switch to suspend CPU */
sigp %r9,%r1,__SIGP_RESTART /* start suspend CPU */ sigp %r9,%r1,6 /* sigp restart to suspend CPU */
brc 2,4b /* busy, try again */ brc 2,4b /* busy, try again */
5: 5:
sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ sigp %r9,%r2,5 /* sigp stop to current resume CPU */
brc 2,5b /* busy, try again */ brc 2,5b /* busy, try again */
6: j 6b 6: j 6b
...@@ -207,7 +207,7 @@ restart_suspend: ...@@ -207,7 +207,7 @@ restart_suspend:
larl %r1,.Lresume_cpu larl %r1,.Lresume_cpu
llgh %r2,0(%r1) llgh %r2,0(%r1)
7: 7:
sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ sigp %r9,%r2,1 /* sigp sense, wait for resume CPU */
brc 8,7b /* accepted, status 0, still running */ brc 8,7b /* accepted, status 0, still running */
brc 2,7b /* busy, try again */ brc 2,7b /* busy, try again */
tmll %r9,0x40 /* Test if resume CPU is stopped */ tmll %r9,0x40 /* Test if resume CPU is stopped */
......
...@@ -79,12 +79,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, ...@@ -79,12 +79,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
cpu < TOPOLOGY_CPU_BITS; cpu < TOPOLOGY_CPU_BITS;
cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1)) cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
{ {
unsigned int rcpu, lcpu; unsigned int rcpu;
int lcpu;
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) { lcpu = smp_find_processor_id(rcpu);
if (cpu_logical_map(lcpu) != rcpu) if (lcpu >= 0) {
continue;
cpumask_set_cpu(lcpu, &book->mask); cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id; cpu_book_id[lcpu] = book->id;
cpumask_set_cpu(lcpu, &core->mask); cpumask_set_cpu(lcpu, &core->mask);
......
...@@ -88,19 +88,12 @@ static void vdso_init_data(struct vdso_data *vd) ...@@ -88,19 +88,12 @@ static void vdso_init_data(struct vdso_data *vd)
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/*
* Setup per cpu vdso data page.
*/
static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
{
}
/* /*
* Allocate/free per cpu vdso data. * Allocate/free per cpu vdso data.
*/ */
#define SEGMENT_ORDER 2 #define SEGMENT_ORDER 2
int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) int vdso_alloc_per_cpu(struct _lowcore *lowcore)
{ {
unsigned long segment_table, page_table, page_frame; unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste; u32 *psal, *aste;
...@@ -139,7 +132,6 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) ...@@ -139,7 +132,6 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
aste[4] = (u32)(addr_t) psal; aste[4] = (u32)(addr_t) psal;
lowcore->vdso_per_cpu_data = page_frame; lowcore->vdso_per_cpu_data = page_frame;
vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
return 0; return 0;
out: out:
...@@ -149,7 +141,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) ...@@ -149,7 +141,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
return -ENOMEM; return -ENOMEM;
} }
void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) void vdso_free_per_cpu(struct _lowcore *lowcore)
{ {
unsigned long segment_table, page_table, page_frame; unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste; u32 *psal, *aste;
...@@ -168,19 +160,15 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) ...@@ -168,19 +160,15 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
free_pages(segment_table, SEGMENT_ORDER); free_pages(segment_table, SEGMENT_ORDER);
} }
static void __vdso_init_cr5(void *dummy) static void vdso_init_cr5(void)
{ {
unsigned long cr5; unsigned long cr5;
if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
cr5 = offsetof(struct _lowcore, paste); cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5); __ctl_load(cr5, 5, 5);
} }
static void vdso_init_cr5(void)
{
if (user_mode != HOME_SPACE_MODE && vdso_enabled)
on_each_cpu(__vdso_init_cr5, NULL, 1);
}
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
/* /*
...@@ -322,10 +310,8 @@ static int __init vdso_init(void) ...@@ -322,10 +310,8 @@ static int __init vdso_init(void)
} }
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
vdso64_pagelist[vdso64_pages] = NULL; vdso64_pagelist[vdso64_pages] = NULL;
#ifndef CONFIG_SMP if (vdso_alloc_per_cpu(&S390_lowcore))
if (vdso_alloc_per_cpu(0, &S390_lowcore))
BUG(); BUG();
#endif
vdso_init_cr5(); vdso_init_cr5();
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
...@@ -335,7 +321,7 @@ static int __init vdso_init(void) ...@@ -335,7 +321,7 @@ static int __init vdso_init(void)
return 0; return 0;
} }
arch_initcall(vdso_init); early_initcall(vdso_init);
int in_gate_area_no_mm(unsigned long addr) int in_gate_area_no_mm(unsigned long addr)
{ {
......
...@@ -570,6 +570,9 @@ void init_cpu_vtimer(void) ...@@ -570,6 +570,9 @@ void init_cpu_vtimer(void)
/* enable cpu timer interrupts */ /* enable cpu timer interrupts */
__ctl_set_bit(0,10); __ctl_set_bit(0,10);
/* set initial cpu timer */
set_vtimer(0x7fffffffffffffffULL);
} }
static int __cpuinit s390_nohz_notify(struct notifier_block *self, static int __cpuinit s390_nohz_notify(struct notifier_block *self,
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp.h>
#include <asm/io.h> #include <asm/io.h>
int spin_retry = 1000; int spin_retry = 1000;
...@@ -24,21 +25,6 @@ static int __init spin_retry_setup(char *str) ...@@ -24,21 +25,6 @@ static int __init spin_retry_setup(char *str)
} }
__setup("spin_retry=", spin_retry_setup); __setup("spin_retry=", spin_retry_setup);
static inline void _raw_yield(void)
{
if (MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,0x44");
}
static inline void _raw_yield_cpu(int cpu)
{
if (MACHINE_HAS_DIAG9C)
asm volatile("diag %0,0,0x9c"
: : "d" (cpu_logical_map(cpu)));
else
_raw_yield();
}
void arch_spin_lock_wait(arch_spinlock_t *lp) void arch_spin_lock_wait(arch_spinlock_t *lp)
{ {
int count = spin_retry; int count = spin_retry;
...@@ -60,7 +46,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) ...@@ -60,7 +46,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
} }
owner = lp->owner_cpu; owner = lp->owner_cpu;
if (owner) if (owner)
_raw_yield_cpu(~owner); smp_yield_cpu(~owner);
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return; return;
} }
...@@ -91,7 +77,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) ...@@ -91,7 +77,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
} }
owner = lp->owner_cpu; owner = lp->owner_cpu;
if (owner) if (owner)
_raw_yield_cpu(~owner); smp_yield_cpu(~owner);
local_irq_disable(); local_irq_disable();
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return; return;
...@@ -121,7 +107,7 @@ void arch_spin_relax(arch_spinlock_t *lock) ...@@ -121,7 +107,7 @@ void arch_spin_relax(arch_spinlock_t *lock)
if (cpu != 0) { if (cpu != 0) {
if (MACHINE_IS_VM || MACHINE_IS_KVM || if (MACHINE_IS_VM || MACHINE_IS_KVM ||
!smp_vcpu_scheduled(~cpu)) !smp_vcpu_scheduled(~cpu))
_raw_yield_cpu(~cpu); smp_yield_cpu(~cpu);
} }
} }
EXPORT_SYMBOL(arch_spin_relax); EXPORT_SYMBOL(arch_spin_relax);
...@@ -133,7 +119,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) ...@@ -133,7 +119,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
_raw_yield(); smp_yield();
count = spin_retry; count = spin_retry;
} }
if (!arch_read_can_lock(rw)) if (!arch_read_can_lock(rw))
...@@ -153,7 +139,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) ...@@ -153,7 +139,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
local_irq_restore(flags); local_irq_restore(flags);
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
_raw_yield(); smp_yield();
count = spin_retry; count = spin_retry;
} }
if (!arch_read_can_lock(rw)) if (!arch_read_can_lock(rw))
...@@ -188,7 +174,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) ...@@ -188,7 +174,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
_raw_yield(); smp_yield();
count = spin_retry; count = spin_retry;
} }
if (!arch_write_can_lock(rw)) if (!arch_write_can_lock(rw))
...@@ -206,7 +192,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) ...@@ -206,7 +192,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
local_irq_restore(flags); local_irq_restore(flags);
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
_raw_yield(); smp_yield();
count = spin_retry; count = spin_retry;
} }
if (!arch_write_can_lock(rw)) if (!arch_write_can_lock(rw))
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sigp.h>
#include <asm/smp.h> #include <asm/smp.h>
#include "sclp.h" #include "sclp.h"
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <asm/ipl.h> #include <asm/ipl.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/sigp.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/processor.h> #include <asm/processor.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment