Commit 3e5095d1 authored by Ingo Molnar's avatar Ingo Molnar

x86: replace CONFIG_X86_SMP with CONFIG_SMP

The x86/Voyager subarch used to have this distinction between
 'x86 SMP support' and 'Voyager SMP support':

 config X86_SMP
	bool
	depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)

This is a pointless distinction - Voyager can (and already does) use
smp_ops to implement various SMP quirks it has - and it can be extended
more to cover all the specialities of Voyager.

So remove this complication in the Kconfig space.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f2fc0e30
...@@ -173,11 +173,6 @@ config GENERIC_PENDING_IRQ ...@@ -173,11 +173,6 @@ config GENERIC_PENDING_IRQ
depends on GENERIC_HARDIRQS && SMP depends on GENERIC_HARDIRQS && SMP
default y default y
config X86_SMP
bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
default y
config USE_GENERIC_SMP_HELPERS config USE_GENERIC_SMP_HELPERS
def_bool y def_bool y
depends on SMP depends on SMP
...@@ -203,7 +198,7 @@ config X86_BIOS_REBOOT ...@@ -203,7 +198,7 @@ config X86_BIOS_REBOOT
config X86_TRAMPOLINE config X86_TRAMPOLINE
bool bool
depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP) depends on SMP || (64BIT && ACPI_SLEEP)
default y default y
config KTIME_SCALAR config KTIME_SCALAR
......
...@@ -83,7 +83,7 @@ config DEBUG_PAGEALLOC ...@@ -83,7 +83,7 @@ config DEBUG_PAGEALLOC
config DEBUG_PER_CPU_MAPS config DEBUG_PER_CPU_MAPS
bool "Debug access to per_cpu maps" bool "Debug access to per_cpu maps"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
depends on X86_SMP depends on SMP
default n default n
help help
Say Y to verify that the per_cpu map being accessed has Say Y to verify that the per_cpu map being accessed has
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* is no hardware IRQ pin equivalent for them, they are triggered * is no hardware IRQ pin equivalent for them, they are triggered
* through the ICC by us (IPIs) * through the ICC by us (IPIs)
*/ */
#ifdef CONFIG_X86_SMP #ifdef CONFIG_SMP
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
......
...@@ -98,7 +98,7 @@ extern asmlinkage void qic_call_function_interrupt(void); ...@@ -98,7 +98,7 @@ extern asmlinkage void qic_call_function_interrupt(void);
extern void smp_apic_timer_interrupt(struct pt_regs *); extern void smp_apic_timer_interrupt(struct pt_regs *);
extern void smp_spurious_interrupt(struct pt_regs *); extern void smp_spurious_interrupt(struct pt_regs *);
extern void smp_error_interrupt(struct pt_regs *); extern void smp_error_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_SMP #ifdef CONFIG_SMP
extern void smp_reschedule_interrupt(struct pt_regs *); extern void smp_reschedule_interrupt(struct pt_regs *);
extern void smp_call_function_interrupt(struct pt_regs *); extern void smp_call_function_interrupt(struct pt_regs *);
extern void smp_call_function_single_interrupt(struct pt_regs *); extern void smp_call_function_single_interrupt(struct pt_regs *);
......
...@@ -57,8 +57,8 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o ...@@ -57,8 +57,8 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_PCI) += early-quirks.o obj-$(CONFIG_PCI) += early-quirks.o
apm-y := apm_32.o apm-y := apm_32.o
obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o ipi.o
obj-$(CONFIG_SMP) += setup_percpu.o obj-$(CONFIG_SMP) += setup_percpu.o
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
......
...@@ -1900,7 +1900,7 @@ void __cpuinit generic_processor_info(int apicid, int version) ...@@ -1900,7 +1900,7 @@ void __cpuinit generic_processor_info(int apicid, int version)
} }
#endif #endif
#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64) #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
#endif #endif
......
...@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) ...@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
*/ */
void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_X86_SMP #ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx, sub_index; unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width; unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings; unsigned int core_select_mask, core_level_siblings;
......
...@@ -344,7 +344,7 @@ static void c1e_idle(void) ...@@ -344,7 +344,7 @@ static void c1e_idle(void)
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_X86_SMP #ifdef CONFIG_SMP
if (pm_idle == poll_idle && smp_num_siblings > 1) { if (pm_idle == poll_idle && smp_num_siblings > 1) {
printk(KERN_WARNING "WARNING: polling idle and HT enabled," printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
" performance may degrade.\n"); " performance may degrade.\n");
......
...@@ -588,7 +588,7 @@ early_param("elfcorehdr", setup_elfcorehdr); ...@@ -588,7 +588,7 @@ early_param("elfcorehdr", setup_elfcorehdr);
static int __init default_update_genapic(void) static int __init default_update_genapic(void)
{ {
#ifdef CONFIG_X86_SMP #ifdef CONFIG_SMP
if (!apic->wakeup_cpu) if (!apic->wakeup_cpu)
apic->wakeup_cpu = wakeup_secondary_cpu_via_init; apic->wakeup_cpu = wakeup_secondary_cpu_via_init;
#endif #endif
......
...@@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void) ...@@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void)
if (!cpu_has_tsc || tsc_unstable) if (!cpu_has_tsc || tsc_unstable)
return 1; return 1;
#ifdef CONFIG_X86_SMP #ifdef CONFIG_SMP
if (apic_is_clustered_box()) if (apic_is_clustered_box())
return 1; return 1;
#endif #endif
......
...@@ -256,7 +256,7 @@ void __devinit vmi_time_bsp_init(void) ...@@ -256,7 +256,7 @@ void __devinit vmi_time_bsp_init(void)
*/ */
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
local_irq_disable(); local_irq_disable();
#ifdef CONFIG_X86_SMP #ifdef CONFIG_SMP
/* /*
* XXX handle_percpu_irq only defined for SMP; we need to switch over * XXX handle_percpu_irq only defined for SMP; we need to switch over
* to using it, since this is a local interrupt, which each CPU must * to using it, since this is a local interrupt, which each CPU must
......
obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o gup.o pat.o pgtable.o gup.o
obj-$(CONFIG_X86_SMP) += tlb.o obj-$(CONFIG_SMP) += tlb.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment