Commit 6e1b97d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
  x86: Dump filtering supports x86_64 sparsemem
  x86: fix compiler warnings in arch/x86/kernel/early-quirks.c
  x86: fix !SMP compiler warning in arch/x86/kernel/acpi/processor.c
  x86: Fix boot protocol KEEP_SEGMENTS check.
  x86: voyager: fix bogus conversion to per_cpu for boot_cpu_info
  x86: export smp_ops to allow modular build of KVM
  Revert "i386: export i386 smp_call_function_mask() to modules"
parents 36ea96a4 69243f91
...@@ -33,24 +33,20 @@ ...@@ -33,24 +33,20 @@
.globl startup_32 .globl startup_32
startup_32: startup_32:
/* check to see if KEEP_SEGMENTS flag is meaningful */ cld
cmpw $0x207, BP_version(%esi)
jb 1f
/* test KEEP_SEGMENTS flag to see if the bootloader is asking /* test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments */ * us to not reload segments */
testb $(1<<6), BP_loadflags(%esi) testb $(1<<6), BP_loadflags(%esi)
jnz 2f jnz 1f
1: cli cli
movl $(__BOOT_DS),%eax movl $(__BOOT_DS),%eax
movl %eax,%ds movl %eax,%ds
movl %eax,%es movl %eax,%es
movl %eax,%fs movl %eax,%fs
movl %eax,%gs movl %eax,%gs
movl %eax,%ss movl %eax,%ss
1:
2: cld
/* Calculate the delta between where we were compiled to run /* Calculate the delta between where we were compiled to run
* at and where we were actually loaded at. This can only be done * at and where we were actually loaded at. This can only be done
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/asm-offsets.h>
.section ".text.head" .section ".text.head"
.code32 .code32
...@@ -36,11 +37,17 @@ ...@@ -36,11 +37,17 @@
startup_32: startup_32:
cld cld
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments */
testb $(1<<6), BP_loadflags(%esi)
jnz 1f
cli cli
movl $(__KERNEL_DS), %eax movl $(__KERNEL_DS), %eax
movl %eax, %ds movl %eax, %ds
movl %eax, %es movl %eax, %es
movl %eax, %ss movl %eax, %ss
1:
/* Calculate the delta between where we were compiled to run /* Calculate the delta between where we were compiled to run
* at and where we were actually loaded at. This can only be done * at and where we were actually loaded at. This can only be done
......
...@@ -62,8 +62,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) ...@@ -62,8 +62,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
/* Initialize _PDC data based on the CPU vendor */ /* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr) void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{ {
unsigned int cpu = pr->id; struct cpuinfo_x86 *c = &cpu_data(pr->id);
struct cpuinfo_x86 *c = &cpu_data(cpu);
pr->pdc = NULL; pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL) if (c->x86_vendor == X86_VENDOR_INTEL)
......
...@@ -15,12 +15,16 @@ ...@@ -15,12 +15,16 @@
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/ia32.h> #include <asm/ia32.h>
#include <asm/bootparam.h>
#define DEFINE(sym, val) \ #define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val)) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : ) #define BLANK() asm volatile("\n->" : : )
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem))
#define __NO_STUBS 1 #define __NO_STUBS 1
#undef __SYSCALL #undef __SYSCALL
#undef _ASM_X86_64_UNISTD_H_ #undef _ASM_X86_64_UNISTD_H_
...@@ -109,5 +113,11 @@ int main(void) ...@@ -109,5 +113,11 @@ int main(void)
DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
BLANK(); BLANK();
DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
BLANK();
OFFSET(BP_scratch, boot_params, scratch);
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
OFFSET(BP_version, boot_params, hdr.version);
return 0; return 0;
} }
...@@ -35,12 +35,14 @@ static void __init via_bugs(void) ...@@ -35,12 +35,14 @@ static void __init via_bugs(void)
} }
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#ifdef CONFIG_X86_IO_APIC
static int __init nvidia_hpet_check(struct acpi_table_header *header) static int __init nvidia_hpet_check(struct acpi_table_header *header)
{ {
return 0; return 0;
} }
#endif #endif /* CONFIG_X86_IO_APIC */
#endif /* CONFIG_ACPI */
static void __init nvidia_bugs(void) static void __init nvidia_bugs(void)
{ {
......
...@@ -233,6 +233,8 @@ NORET_TYPE void machine_kexec(struct kimage *image) ...@@ -233,6 +233,8 @@ NORET_TYPE void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void) void arch_crash_save_vmcoreinfo(void)
{ {
VMCOREINFO_SYMBOL(init_level4_pgt);
#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE #ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
VMCOREINFO_SYMBOL(node_data); VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
......
...@@ -708,10 +708,4 @@ struct smp_ops smp_ops = { ...@@ -708,10 +708,4 @@ struct smp_ops smp_ops = {
.smp_send_reschedule = native_smp_send_reschedule, .smp_send_reschedule = native_smp_send_reschedule,
.smp_call_function_mask = native_smp_call_function_mask, .smp_call_function_mask = native_smp_call_function_mask,
}; };
EXPORT_SYMBOL_GPL(smp_ops);
int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
void *info, int wait)
{
return smp_ops.smp_call_function_mask(mask, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function_mask);
...@@ -29,14 +29,14 @@ ...@@ -29,14 +29,14 @@
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
/* TLB state -- visible externally, indexed physically */ /* TLB state -- visible externally, indexed physically */
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
/* CPU IRQ affinity -- set to all ones initially */ /* CPU IRQ affinity -- set to all ones initially */
static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL };
/* per CPU data structure (for /proc/cpuinfo et al), visible externally /* per CPU data structure (for /proc/cpuinfo et al), visible externally
* indexed physically */ * indexed physically */
DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned; DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info);
/* physical ID of the CPU used to boot the system */ /* physical ID of the CPU used to boot the system */
......
...@@ -94,9 +94,12 @@ static inline void smp_send_reschedule(int cpu) ...@@ -94,9 +94,12 @@ static inline void smp_send_reschedule(int cpu)
{ {
smp_ops.smp_send_reschedule(cpu); smp_ops.smp_send_reschedule(cpu);
} }
extern int smp_call_function_mask(cpumask_t mask, static inline int smp_call_function_mask(cpumask_t mask,
void (*func) (void *info), void *info, void (*func) (void *info), void *info,
int wait); int wait)
{
return smp_ops.smp_call_function_mask(mask, func, info, wait);
}
void native_smp_prepare_boot_cpu(void); void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_prepare_cpus(unsigned int max_cpus);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment