Commit 67626fad authored by Heiko Carstens's avatar Heiko Carstens

s390: enforce CONFIG_SMP

There never have been distributions that shiped with CONFIG_SMP=n for
s390. In addition the kernel currently doesn't even compile with
CONFIG_SMP=n for s390. Most likely it wouldn't even work, even if we
fix the compile error, since nobody tests it, since there is no use
case that I can think of.
Therefore simply enforce CONFIG_SMP and get rid of some more or
less unused code.
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
parent 753469a2
...@@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS ...@@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
def_bool y def_bool y
config GENERIC_LOCKBREAK config GENERIC_LOCKBREAK
def_bool y if SMP && PREEMPT def_bool y if PREEMPT
config PGSTE config PGSTE
def_bool y if KVM def_bool y if KVM
...@@ -113,7 +113,6 @@ config S390 ...@@ -113,7 +113,6 @@ config S390
select DYNAMIC_FTRACE if FUNCTION_TRACER select DYNAMIC_FTRACE if FUNCTION_TRACER
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES if !SMP
select GENERIC_CPU_VULNERABILITIES select GENERIC_CPU_VULNERABILITIES
select GENERIC_FIND_FIRST_BIT select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
...@@ -399,27 +398,10 @@ config SYSVIPC_COMPAT ...@@ -399,27 +398,10 @@ config SYSVIPC_COMPAT
config SMP config SMP
def_bool y def_bool y
prompt "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
you have a system with more than one CPU, say Y.
If you say N here, the kernel will run on uni- and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on many, but not all,
uniprocessor machines. On a uniprocessor machine, the kernel
will run faster if you say N here.
See also the SMP-HOWTO available at
<http://www.tldp.org/docs.html#howto>.
Even if you don't know what to do here, say Y.
config NR_CPUS config NR_CPUS
int "Maximum number of CPUs (2-512)" int "Maximum number of CPUs (2-512)"
range 2 512 range 2 512
depends on SMP
default "64" default "64"
help help
This allows you to specify the maximum number of CPUs which this This allows you to specify the maximum number of CPUs which this
...@@ -432,7 +414,6 @@ config NR_CPUS ...@@ -432,7 +414,6 @@ config NR_CPUS
config HOTPLUG_CPU config HOTPLUG_CPU
def_bool y def_bool y
prompt "Support for hot-pluggable CPUs" prompt "Support for hot-pluggable CPUs"
depends on SMP
help help
Say Y here to be able to turn CPUs off and on. CPUs Say Y here to be able to turn CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu/cpu#. can be controlled through /sys/devices/system/cpu/cpu#.
...@@ -448,7 +429,7 @@ config NODES_SPAN_OTHER_NODES ...@@ -448,7 +429,7 @@ config NODES_SPAN_OTHER_NODES
config NUMA config NUMA
bool "NUMA support" bool "NUMA support"
depends on SMP && SCHED_TOPOLOGY depends on SCHED_TOPOLOGY
default n default n
help help
Enable NUMA support Enable NUMA support
...@@ -523,7 +504,6 @@ config SCHED_DRAWER ...@@ -523,7 +504,6 @@ config SCHED_DRAWER
config SCHED_TOPOLOGY config SCHED_TOPOLOGY
def_bool y def_bool y
prompt "Topology scheduler support" prompt "Topology scheduler support"
depends on SMP
select SCHED_SMT select SCHED_SMT
select SCHED_MC select SCHED_MC
select SCHED_BOOK select SCHED_BOOK
...@@ -829,7 +809,6 @@ menu "Dump support" ...@@ -829,7 +809,6 @@ menu "Dump support"
config CRASH_DUMP config CRASH_DUMP
bool "kernel crash dumps" bool "kernel crash dumps"
depends on SMP
select KEXEC select KEXEC
help help
Generate crash dump after being started by kexec. Generate crash dump after being started by kexec.
......
...@@ -112,13 +112,8 @@ union ctlreg2 { ...@@ -112,13 +112,8 @@ union ctlreg2 {
}; };
}; };
#ifdef CONFIG_SMP #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
#else
# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_CTL_REG_H */ #endif /* __ASM_CTL_REG_H */
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
* per cpu area, use weak definitions to force the compiler to * per cpu area, use weak definitions to force the compiler to
* generate external references. * generate external references.
*/ */
#if defined(CONFIG_SMP) && defined(MODULE) #if defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU #define ARCH_NEEDS_WEAK_PER_CPU
#endif #endif
......
...@@ -9,9 +9,6 @@ ...@@ -9,9 +9,6 @@
#define __ASM_SMP_H #define __ASM_SMP_H
#include <asm/sigp.h> #include <asm/sigp.h>
#ifdef CONFIG_SMP
#include <asm/lowcore.h> #include <asm/lowcore.h>
#define raw_smp_processor_id() (S390_lowcore.cpu_nr) #define raw_smp_processor_id() (S390_lowcore.cpu_nr)
...@@ -40,33 +37,6 @@ extern int smp_cpu_get_polarization(int cpu); ...@@ -40,33 +37,6 @@ extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void); extern void smp_fill_possible_mask(void);
extern void smp_detect_cpus(void); extern void smp_detect_cpus(void);
#else /* CONFIG_SMP */
#define smp_cpu_mtid 0
static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
func(data);
}
static inline void smp_call_online_cpu(void (*func)(void *), void *data)
{
func(data);
}
static inline void smp_emergency_stop(void)
{
}
static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_fill_possible_mask(void) { }
static inline void smp_detect_cpus(void) { }
#endif /* CONFIG_SMP */
static inline void smp_stop_cpu(void) static inline void smp_stop_cpu(void)
{ {
u16 pcpu = stap(); u16 pcpu = stap();
......
...@@ -20,11 +20,7 @@ ...@@ -20,11 +20,7 @@
extern int spin_retry; extern int spin_retry;
#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
#else
bool arch_vcpu_is_preempted(int cpu); bool arch_vcpu_is_preempted(int cpu);
#endif
#define vcpu_is_preempted arch_vcpu_is_preempted #define vcpu_is_preempted arch_vcpu_is_preempted
......
...@@ -32,7 +32,6 @@ static inline void __tlb_flush_idte(unsigned long asce) ...@@ -32,7 +32,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
: : "a" (opt), "a" (asce) : "cc"); : : "a" (opt), "a" (asce) : "cc");
} }
#ifdef CONFIG_SMP
void smp_ptlb_all(void); void smp_ptlb_all(void);
/* /*
...@@ -83,22 +82,6 @@ static inline void __tlb_flush_kernel(void) ...@@ -83,22 +82,6 @@ static inline void __tlb_flush_kernel(void)
else else
__tlb_flush_global(); __tlb_flush_global();
} }
#else
#define __tlb_flush_global() __tlb_flush_local()
/*
* Flush TLB entries for a specific ASCE on all CPUs.
*/
static inline void __tlb_flush_mm(struct mm_struct *mm)
{
__tlb_flush_local();
}
static inline void __tlb_flush_kernel(void)
{
__tlb_flush_local();
}
#endif
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{ {
......
...@@ -53,6 +53,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o ...@@ -53,6 +53,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
obj-y += smp.o
extra-y += head64.o vmlinux.lds extra-y += head64.o vmlinux.lds
...@@ -60,7 +61,6 @@ obj-$(CONFIG_SYSFS) += nospec-sysfs.o ...@@ -60,7 +61,6 @@ obj-$(CONFIG_SYSFS) += nospec-sysfs.o
CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
obj-$(CONFIG_AUDIT) += audit.o obj-$(CONFIG_AUDIT) += audit.o
......
...@@ -199,9 +199,7 @@ void die(struct pt_regs *regs, const char *str) ...@@ -199,9 +199,7 @@ void die(struct pt_regs *regs, const char *str)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
pr_cont("PREEMPT "); pr_cont("PREEMPT ");
#endif #endif
#ifdef CONFIG_SMP
pr_cont("SMP "); pr_cont("SMP ");
#endif
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled())
pr_cont("DEBUG_PAGEALLOC"); pr_cont("DEBUG_PAGEALLOC");
pr_cont("\n"); pr_cont("\n");
......
...@@ -986,14 +986,12 @@ ENTRY(psw_idle) ...@@ -986,14 +986,12 @@ ENTRY(psw_idle)
stg %r3,__SF_EMPTY(%r15) stg %r3,__SF_EMPTY(%r15)
larl %r1,.Lpsw_idle_lpsw+4 larl %r1,.Lpsw_idle_lpsw+4
stg %r1,__SF_EMPTY+8(%r15) stg %r1,__SF_EMPTY+8(%r15)
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mtid larl %r1,smp_cpu_mtid
llgf %r1,0(%r1) llgf %r1,0(%r1)
ltgr %r1,%r1 ltgr %r1,%r1
jz .Lpsw_idle_stcctm jz .Lpsw_idle_stcctm
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
.Lpsw_idle_stcctm: .Lpsw_idle_stcctm:
#endif
oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
BPON BPON
STCK __CLOCK_IDLE_ENTER(%r2) STCK __CLOCK_IDLE_ENTER(%r2)
...@@ -1468,7 +1466,6 @@ ENDPROC(cleanup_critical) ...@@ -1468,7 +1466,6 @@ ENDPROC(cleanup_critical)
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1: # calculate idle cycles 1: # calculate idle cycles
#ifdef CONFIG_SMP
clg %r9,BASED(.Lcleanup_idle_insn) clg %r9,BASED(.Lcleanup_idle_insn)
jl 3f jl 3f
larl %r1,smp_cpu_mtid larl %r1,smp_cpu_mtid
...@@ -1486,7 +1483,6 @@ ENDPROC(cleanup_critical) ...@@ -1486,7 +1483,6 @@ ENDPROC(cleanup_critical)
la %r3,8(%r3) la %r3,8(%r3)
la %r4,8(%r4) la %r4,8(%r4)
brct %r1,2b brct %r1,2b
#endif
3: # account system time going idle 3: # account system time going idle
lg %r9,__LC_STEAL_TIMER lg %r9,__LC_STEAL_TIMER
alg %r9,__CLOCK_IDLE_ENTER(%r2) alg %r9,__CLOCK_IDLE_ENTER(%r2)
......
...@@ -461,11 +461,9 @@ static void __init setup_lowcore_dat_off(void) ...@@ -461,11 +461,9 @@ static void __init setup_lowcore_dat_off(void)
mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
#ifdef CONFIG_SMP
lc->spinlock_lockval = arch_spin_lockval(0); lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0; lc->spinlock_index = 0;
arch_spin_lock_setup(0); arch_spin_lock_setup(0);
#endif
lc->br_r1_trampoline = 0x07f1; /* br %r1 */ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
set_prefix((u32)(unsigned long) lc); set_prefix((u32)(unsigned long) lc);
......
...@@ -162,7 +162,6 @@ ENTRY(swsusp_arch_resume) ...@@ -162,7 +162,6 @@ ENTRY(swsusp_arch_resume)
larl %r1,__swsusp_reset_dma larl %r1,__swsusp_reset_dma
lg %r1,0(%r1) lg %r1,0(%r1)
BASR_EX %r14,%r1 BASR_EX %r14,%r1
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mt_shift larl %r1,smp_cpu_mt_shift
icm %r1,15,0(%r1) icm %r1,15,0(%r1)
jz smt_done jz smt_done
...@@ -172,7 +171,6 @@ smt_loop: ...@@ -172,7 +171,6 @@ smt_loop:
brc 8,smt_done /* accepted */ brc 8,smt_done /* accepted */
brc 2,smt_loop /* busy, try again */ brc 2,smt_loop /* busy, try again */
smt_done: smt_done:
#endif
larl %r1,.Lnew_pgm_check_psw larl %r1,.Lnew_pgm_check_psw
lpswe 0(%r1) lpswe 0(%r1)
pgm_check_entry: pgm_check_entry:
......
...@@ -3,9 +3,8 @@ ...@@ -3,9 +3,8 @@
# Makefile for s390-specific library files.. # Makefile for s390-specific library files..
# #
lib-y += delay.o string.o uaccess.o find.o lib-y += delay.o string.o uaccess.o find.o spinlock.o
obj-y += mem.o xor.o obj-y += mem.o xor.o
lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o lib-$(CONFIG_UPROBES) += probes.o
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment