Commit acee709c authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'x86/urgent', 'x86/amd-iommu', 'x86/apic', 'x86/cleanups',...

Merge branches 'x86/urgent', 'x86/amd-iommu', 'x86/apic', 'x86/cleanups', 'x86/core', 'x86/cpu', 'x86/fixmap', 'x86/gart', 'x86/kprobes', 'x86/memtest', 'x86/modules', 'x86/nmi', 'x86/pat', 'x86/reboot', 'x86/setup', 'x86/step', 'x86/unify-pci', 'x86/uv', 'x86/xen' and 'xen-64bit' into x86/for-linus
......@@ -1206,7 +1206,7 @@ and is between 256 and 4096 characters. It is defined in the file
or
memmap=0x10000$0x18690000
memtest= [KNL,X86_64] Enable memtest
memtest= [KNL,X86] Enable memtest
Format: <integer>
range: 0,4 : pattern number
default : 0 <disable>
......@@ -2158,6 +2158,10 @@ and is between 256 and 4096 characters. It is defined in the file
Note that genuine overcurrent events won't be
reported either.
unknown_nmi_panic
[X86-32,X86-64]
Set unknown_nmi_panic=1 early on boot.
usbcore.autosuspend=
[USB] The autosuspend time delay (in seconds) used
for newly-detected USB devices (default 2). This
......
......@@ -447,7 +447,6 @@ config PARAVIRT_DEBUG
config MEMTEST
bool "Memtest"
depends on X86_64
help
This option adds a kernel parameter 'memtest', which allows memtest
to be set.
......
......@@ -362,10 +362,6 @@ config X86_ALIGNMENT_16
def_bool y
depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
config X86_GOOD_APIC
def_bool y
depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON || MCORE2 || MVIAC7 || X86_64
config X86_INTEL_USERCOPY
def_bool y
depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
......
......@@ -289,7 +289,6 @@ config CPA_DEBUG
config OPTIMIZE_INLINING
bool "Allow gcc to uninline functions marked 'inline'"
depends on BROKEN
help
This option determines if the kernel forces gcc to inline the functions
developers have marked 'inline'. Doing so takes away freedom from gcc to
......@@ -300,5 +299,7 @@ config OPTIMIZE_INLINING
become the default in the future, until then this option is there to
test gcc for this.
If unsure, say N.
endmenu
......@@ -167,9 +167,8 @@ void query_edd(void)
* Scan the BIOS-supported hard disks and query EDD
* information...
*/
get_edd_info(devno, &ei);
if (boot_params.eddbuf_entries < EDDMAXNR) {
if (!get_edd_info(devno, &ei)
&& boot_params.eddbuf_entries < EDDMAXNR) {
memcpy(edp, &ei, sizeof ei);
edp++;
boot_params.eddbuf_entries++;
......
......@@ -98,12 +98,6 @@ static void reset_coprocessor(void)
/*
* Set up the GDT
*/
#define GDT_ENTRY(flags, base, limit) \
(((u64)(base & 0xff000000) << 32) | \
((u64)flags << 40) | \
((u64)(limit & 0x00ff0000) << 32) | \
((u64)(base & 0x00ffffff) << 16) | \
((u64)(limit & 0x0000ffff)))
struct gdt_ptr {
u16 len;
......
......@@ -36,6 +36,11 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
X86_EFLAGS_CF)
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
......@@ -248,7 +253,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
regs->ss |= 3;
err |= __get_user(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
/* disable syscall checks */
regs->orig_ax = -1;
......@@ -515,7 +520,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
compat_sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
struct exec_domain *ed = current_thread_info()->exec_domain;
void __user *restorer;
int err = 0;
......@@ -538,8 +542,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
err |= __put_user((ed && ed->signal_invmap && sig < 32
? ed->signal_invmap[sig] : sig), &frame->sig);
err |= __put_user(sig, &frame->sig);
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);
......
......@@ -37,6 +37,11 @@
movq %rax,R8(%rsp)
.endm
/*
* Reload arg registers from stack in case ptrace changed them.
* We don't reload %eax because syscall_trace_enter() returned
* the value it wants us to use in the table lookup.
*/
.macro LOAD_ARGS32 offset
movl \offset(%rsp),%r11d
movl \offset+8(%rsp),%r10d
......@@ -46,7 +51,6 @@
movl \offset+48(%rsp),%edx
movl \offset+56(%rsp),%esi
movl \offset+64(%rsp),%edi
movl \offset+72(%rsp),%eax
.endm
.macro CFI_STARTPROC32 simple
......@@ -137,13 +141,12 @@ ENTRY(ia32_sysenter_target)
.previous
GET_THREAD_INFO(%r10)
orl $TS_COMPAT,TI_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
TI_flags(%r10)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
sysenter_do_call:
cmpl $(IA32_NR_syscalls-1),%eax
ja ia32_badsys
sysenter_do_call:
IA32_ARG_FIXUP 1
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
......@@ -242,8 +245,7 @@ ENTRY(ia32_cstar_target)
.previous
GET_THREAD_INFO(%r10)
orl $TS_COMPAT,TI_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
TI_flags(%r10)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz cstar_tracesys
cstar_do_call:
......@@ -321,6 +323,7 @@ ENTRY(ia32_syscall)
/*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
/*CFI_REL_OFFSET cs,CS-RIP*/
CFI_REL_OFFSET rip,RIP-RIP
PARAVIRT_ADJUST_EXCEPTION_FRAME
SWAPGS
/*
* No need to follow this irqs on/off section: the syscall
......@@ -336,8 +339,7 @@ ENTRY(ia32_syscall)
SAVE_ARGS 0,0,1
GET_THREAD_INFO(%r10)
orl $TS_COMPAT,TI_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
TI_flags(%r10)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
jnz ia32_tracesys
ia32_do_syscall:
cmpl $(IA32_NR_syscalls-1),%eax
......
......@@ -102,6 +102,7 @@ obj-$(CONFIG_OLPC) += olpc.o
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
obj-y += bios_uv.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o
......
......@@ -9,6 +9,7 @@
#include <linux/bootmem.h>
#include <linux/dmi.h>
#include <linux/cpumask.h>
#include <asm/segment.h>
#include "realmode/wakeup.h"
#include "sleep.h"
......@@ -23,15 +24,6 @@ static unsigned long acpi_realmode;
static char temp_stack[10240];
#endif
/* XXX: this macro should move to asm-x86/segment.h and be shared with the
boot code... */
#define GDT_ENTRY(flags, base, limit) \
(((u64)(base & 0xff000000) << 32) | \
((u64)flags << 40) | \
((u64)(limit & 0x00ff0000) << 32) | \
((u64)(base & 0x00ffffff) << 16) | \
((u64)(limit & 0x0000ffff)))
/**
* acpi_save_state_mem - save kernel state
*
......
This diff is collapsed.
This diff is collapsed.
......@@ -21,6 +21,7 @@
#include <linux/suspend.h>
#include <asm/e820.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
......
This diff is collapsed.
......@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
/*
* Debug level, exported for io_apic.c
*/
int apic_verbosity;
unsigned int apic_verbosity;
/* Have we found an MP table */
int smp_found_config;
......@@ -314,7 +314,7 @@ static void setup_APIC_timer(void)
#define TICK_COUNT 100000000
static void __init calibrate_APIC_clock(void)
static int __init calibrate_APIC_clock(void)
{
unsigned apic, apic_start;
unsigned long tsc, tsc_start;
......@@ -368,6 +368,17 @@ static void __init calibrate_APIC_clock(void)
clockevent_delta2ns(0xF, &lapic_clockevent);
calibration_result = result / HZ;
/*
* Do a sanity check on the APIC calibration result
*/
if (calibration_result < (1000000 / HZ)) {
printk(KERN_WARNING
"APIC frequency too slow, disabling apic timer\n");
return -1;
}
return 0;
}
/*
......@@ -394,14 +405,7 @@ void __init setup_boot_APIC_clock(void)
}
printk(KERN_INFO "Using local APIC timer interrupts.\n");
calibrate_APIC_clock();
/*
* Do a sanity check on the APIC calibration result
*/
if (calibration_result < (1000000 / HZ)) {
printk(KERN_WARNING
"APIC frequency too slow, disabling apic timer\n");
if (calibrate_APIC_clock()) {
/* No broadcast on UP ! */
if (num_possible_cpus() > 1)
setup_APIC_timer();
......@@ -1337,7 +1341,7 @@ early_param("apic", apic_set_verbosity);
static __init int setup_disableapic(char *str)
{
disable_apic = 1;
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
setup_clear_cpu_cap(X86_FEATURE_APIC);
return 0;
}
early_param("disableapic", setup_disableapic);
......
......@@ -18,6 +18,8 @@
#include <asm/ia32.h>
#include <asm/bootparam.h>
#include <xen/interface/xen.h>
#define __NO_STUBS 1
#undef __SYSCALL
#undef _ASM_X86_64_UNISTD_H_
......@@ -131,5 +133,14 @@ int main(void)
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
OFFSET(BP_version, boot_params, hdr.version);
BLANK();
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
#ifdef CONFIG_XEN
BLANK();
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
#undef ENTRY
#endif
return 0;
}
/*
* BIOS run time interface routines.
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/uv/bios.h>
const char *
x86_bios_strerror(long status)
{
const char *str;
switch (status) {
case 0: str = "Call completed without error"; break;
case -1: str = "Not implemented"; break;
case -2: str = "Invalid argument"; break;
case -3: str = "Call completed with error"; break;
default: str = "Unknown BIOS status code"; break;
}
return str;
}
long
x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
unsigned long *drift_info)
{
struct uv_bios_retval isrv;
BIOS_CALL(isrv, BIOS_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
*ticks_per_second = isrv.v0;
*drift_info = isrv.v1;
return isrv.status;
}
EXPORT_SYMBOL_GPL(x86_bios_freq_base);
......@@ -24,8 +24,6 @@
extern void vide(void);
__asm__(".align 4\nvide: ret");
int force_mwait __cpuinitdata;
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
if (cpuid_eax(0x80000000) >= 0x80000007) {
......
......@@ -115,6 +115,8 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
if (c->x86_power & (1<<8))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
}
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
......
......@@ -131,13 +131,7 @@ static void __init check_popad(void)
* (for due to lack of "invlpg" and working WP on a i386)
* - In order to run on anything without a TSC, we need to be
* compiled for a i486.
* - In order to support the local APIC on a buggy Pentium machine,
* we need to be compiled with CONFIG_X86_GOOD_APIC disabled,
* which happens implicitly if compiled for a Pentium or lower
* (unless an advanced selection of CPU features is used) as an
* otherwise config implies a properly working local APIC without
* the need to do extra reads from the APIC.
*/
*/
static void __init check_config(void)
{
......@@ -151,21 +145,6 @@ static void __init check_config(void)
if (boot_cpu_data.x86 == 3)
panic("Kernel requires i486+ for 'invlpg' and other features");
#endif
/*
* If we were told we had a good local APIC, check for buggy Pentia,
* i.e. all B steppings and the C2 stepping of P54C when using their
* integrated APIC (see 11AP erratum in "Pentium Processor
* Specification Update").
*/
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
&& cpu_has_apic
&& boot_cpu_data.x86 == 5
&& boot_cpu_data.x86_model == 2
&& (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
#endif
}
......
......@@ -7,15 +7,13 @@
#include <linux/module.h>
#include <linux/kgdb.h>
#include <linux/topology.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/msr.h>
#include <asm/io.h>
#include <asm/linkage.h>
#include <asm/mmu_context.h>
#include <asm/mtrr.h>
#include <asm/mce.h>
......@@ -305,7 +303,6 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86_capability[2] = cpuid_edx(0x80860001);
}
c->extended_cpuid_level = cpuid_eax(0x80000000);
if (c->extended_cpuid_level >= 0x80000007)
c->x86_power = cpuid_edx(0x80000007);
......@@ -316,18 +313,11 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86_phys_bits = eax & 0xff;
}
/* Assume all 64-bit CPUs support 32-bit syscall */
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
cpu_devs[c->x86_vendor]->c_early_init)
cpu_devs[c->x86_vendor]->c_early_init(c);
validate_pat_support(c);
/* early_param could clear that, but recall get it set again */
if (disable_apic)
clear_cpu_cap(c, X86_FEATURE_APIC);
}
/*
......@@ -517,8 +507,7 @@ void pda_init(int cpu)
}
char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
DEBUG_STKSZ]
__attribute__((section(".bss.page_aligned")));
DEBUG_STKSZ] __page_aligned_bss;
extern asmlinkage void ignore_sysret(void);
......
......@@ -227,6 +227,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
if (cpu_has_bts)
ds_init_intel(c);
/*
* See if we have a good local APIC by checking for buggy Pentia,
* i.e. all B steppings and the C2 stepping of P54C when using their
* integrated APIC (see 11AP erratum in "Pentium Processor
* Specification Update").
*/
if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
(c->x86_mask < 0x6 || c->x86_mask == 0xb))
set_cpu_cap(c, X86_FEATURE_11AP);
#ifdef CONFIG_X86_NUMAQ
numaq_tsc_disable();
#endif
......
......@@ -102,7 +102,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
/* The temperature transition interrupt handler setup */
h = THERMAL_APIC_VECTOR; /* our delivery vector */
h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */
apic_write_around(APIC_LVTTHMR, h);
apic_write(APIC_LVTTHMR, h);
rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);
......@@ -114,7 +114,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h);
l = apic_read(APIC_LVTTHMR);
apic_write_around(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
printk(KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu);
/* enable thermal throttle processing */
......
......@@ -877,7 +877,8 @@ void __init early_res_to_bootmem(u64 start, u64 end)
for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++)
count++;
printk(KERN_INFO "(%d early reservations) ==> bootmem\n", count);
printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
count, start, end);
for (i = 0; i < count; i++) {
struct early_res *r = &early_res[i];
printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
......@@ -1298,11 +1299,6 @@ void __init e820_reserve_resources(void)
}
}
/*
* Non-standard memory setup can be specified via this quirk:
*/
char * (*arch_memory_setup_quirk)(void);
char *__init default_machine_specific_memory_setup(void)
{
char *who = "BIOS-e820";
......@@ -1343,8 +1339,8 @@ char *__init default_machine_specific_memory_setup(void)
char *__init __attribute__((weak)) machine_specific_memory_setup(void)
{
if (arch_memory_setup_quirk) {
char *who = arch_memory_setup_quirk();
if (x86_quirks->arch_memory_setup) {
char *who = x86_quirks->arch_memory_setup();
if (who)
return who;
......@@ -1367,24 +1363,3 @@ void __init setup_memory_map(void)
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
#ifdef CONFIG_X86_64
int __init arch_get_ram_range(int slot, u64 *addr, u64 *size)
{
int i;
if (slot < 0 || slot >= e820.nr_map)
return -1;
for (i = slot; i < e820.nr_map; i++) {
if (e820.map[i].type != E820_RAM)
continue;
break;
}
if (i == e820.nr_map || e820.map[i].addr > (max_pfn << PAGE_SHIFT))
return -1;
*addr = e820.map[i].addr;
*size = min_t(u64, e820.map[i].size + e820.map[i].addr,
max_pfn << PAGE_SHIFT) - *addr;
return i + 1;
}
#endif
......@@ -16,10 +16,7 @@
#include <asm/dma.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#ifdef CONFIG_GART_IOMMU
#include <asm/gart.h>
#endif
#include <asm/iommu.h>
static void __init fix_hypertransport_config(int num, int slot, int func)
{
......
......@@ -332,7 +332,7 @@ sysenter_past_esp:
GET_THREAD_INFO(%ebp)
/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
jnz syscall_trace_entry
cmpl $(nr_syscalls), %eax
jae syscall_badsys
......@@ -370,7 +370,7 @@ ENTRY(system_call)
GET_THREAD_INFO(%ebp)
# system call tracing in operation / emulation
/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
jnz syscall_trace_entry
cmpl $(nr_syscalls), %eax
jae syscall_badsys
......@@ -383,10 +383,6 @@ syscall_exit:
# setting need_resched or sigpending
# between sampling and the iret
TRACE_IRQS_OFF
testl $X86_EFLAGS_TF,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
jz no_singlestep
orl $_TIF_SINGLESTEP,TI_flags(%ebp)
no_singlestep:
movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx # current->work
jne syscall_exit_work
......@@ -514,12 +510,8 @@ END(work_pending)
syscall_trace_entry:
movl $-ENOSYS,PT_EAX(%esp)
movl %esp, %eax
xorl %edx,%edx
call do_syscall_trace
cmpl $0, %eax
jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
# so must skip actual syscall
movl PT_ORIG_EAX(%esp), %eax
call syscall_trace_enter
/* What it returned is what we'll actually use. */
cmpl $(nr_syscalls), %eax
jnae syscall_call
jmp syscall_exit
......@@ -528,14 +520,13 @@ END(syscall_trace_entry)
# perform syscall exit tracing
ALIGN
syscall_exit_work:
testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
testb $_TIF_WORK_SYSCALL_EXIT, %cl
jz work_pending
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
# schedule() instead
movl %esp, %eax
movl $1, %edx
call do_syscall_trace
call syscall_trace_leave
jmp resume_userspace
END(syscall_exit_work)
CFI_ENDPROC
......@@ -1024,6 +1015,7 @@ ENDPROC(kernel_thread_helper)
ENTRY(xen_sysenter_target)
RING0_INT_FRAME
addl $5*4, %esp /* remove xen-provided frame */
CFI_ADJUST_CFA_OFFSET -5*4
jmp sysenter_past_esp
CFI_ENDPROC
......
......@@ -349,8 +349,7 @@ ENTRY(system_call_after_swapgs)
movq %rcx,RIP-ARGOFFSET(%rsp)
CFI_REL_OFFSET rip,RIP-ARGOFFSET
GET_THREAD_INFO(%rcx)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
TI_flags(%rcx)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
jnz tracesys
cmpq $__NR_syscall_max,%rax
ja badsys
......@@ -430,7 +429,12 @@ tracesys:
FIXUP_TOP_OF_STACK %rdi
movq %rsp,%rdi
call syscall_trace_enter
LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
/*
* Reload arg registers from stack in case ptrace changed them.
* We don't reload %rax because syscall_trace_enter() returned
* the value it wants us to use in the table lookup.
*/
LOAD_ARGS ARGOFFSET, 1
RESTORE_REST
cmpq $__NR_syscall_max,%rax
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
......@@ -483,7 +487,7 @@ int_very_careful:
ENABLE_INTERRUPTS(CLBR_NONE)
SAVE_REST
/* Check for syscall exit trace */
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
testl $_TIF_WORK_SYSCALL_EXIT,%edx
jz int_signal
pushq %rdi
CFI_ADJUST_CFA_OFFSET 8
......@@ -491,7 +495,7 @@ int_very_careful:
call syscall_trace_leave
popq %rdi
CFI_ADJUST_CFA_OFFSET -8
andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
jmp int_restore_rest
int_signal:
......@@ -1189,6 +1193,7 @@ END(device_not_available)
/* runs on exception stack */
KPROBE_ENTRY(debug)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq $0
CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_debug, DEBUG_STACK
......@@ -1198,6 +1203,7 @@ KPROBE_END(debug)
/* runs on exception stack */
KPROBE_ENTRY(nmi)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq $-1
CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_nmi, 0, 0
......@@ -1211,6 +1217,7 @@ KPROBE_END(nmi)
KPROBE_ENTRY(int3)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq $0
CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_int3, DEBUG_STACK
......@@ -1237,6 +1244,7 @@ END(coprocessor_segment_overrun)
/* runs on exception stack */
ENTRY(double_fault)
XCPT_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
paranoidentry do_double_fault
jmp paranoid_exit1
CFI_ENDPROC
......@@ -1253,6 +1261,7 @@ END(segment_not_present)
/* runs on exception stack */
ENTRY(stack_segment)
XCPT_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
paranoidentry do_stack_segment
jmp paranoid_exit1
CFI_ENDPROC
......@@ -1278,6 +1287,7 @@ END(spurious_interrupt_bug)
/* runs on exception stack */
ENTRY(machine_check)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq $0
CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_machine_check
......@@ -1312,3 +1322,103 @@ KPROBE_ENTRY(ignore_sysret)
sysret
CFI_ENDPROC
ENDPROC(ignore_sysret)
#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
zeroentry xen_do_hypervisor_callback
END(xen_hypervisor_callback)
/*
# A note on the "critical region" in our callback handler.
# We want to avoid stacking callback handlers due to events occurring
# during handling of the last event. To do this, we keep events disabled
# until we've done all processing. HOWEVER, we must enable events before
# popping the stack frame (can't be done atomically) and so it would still
# be possible to get enough handler activations to overflow the stack.
# Although unlikely, bugs of that kind are hard to track down, so we'd
# like to avoid the possibility.
# So, on entry to the handler we detect whether we interrupted an
# existing activation in its critical region -- if so, we pop the current
# activation and restart the handler using the previous one.
*/
ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
CFI_STARTPROC
/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
see the correct pointer to the pt_regs */
movq %rdi, %rsp # we don't return, adjust the stack frame
CFI_ENDPROC
CFI_DEFAULT_STACK
11: incl %gs:pda_irqcount
movq %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
cmovzq %gs:pda_irqstackptr,%rsp
pushq %rbp # backlink for old unwinder
call xen_evtchn_do_upcall
popq %rsp
CFI_DEF_CFA_REGISTER rsp
decl %gs:pda_irqcount
jmp error_exit
CFI_ENDPROC
END(do_hypervisor_callback)
/*
# Hypervisor uses this for application faults while it executes.
# We get here for two reasons:
# 1. Fault while reloading DS, ES, FS or GS
# 2. Fault while executing IRET
# Category 1 we do not need to fix up as Xen has already reloaded all segment
# registers that could be reloaded and zeroed the others.
# Category 2 we fix up by killing the current process. We cannot use the
# normal Linux return path in this case because if we use the IRET hypercall
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
# We distinguish between categories by comparing each saved segment register
# with its current contents: any discrepancy means we in category 1.
*/
ENTRY(xen_failsafe_callback)
framesz = (RIP-0x30) /* workaround buggy gas */
_frame framesz
CFI_REL_OFFSET rcx, 0
CFI_REL_OFFSET r11, 8
movw %ds,%cx
cmpw %cx,0x10(%rsp)
CFI_REMEMBER_STATE
jne 1f
movw %es,%cx
cmpw %cx,0x18(%rsp)
jne 1f
movw %fs,%cx
cmpw %cx,0x20(%rsp)
jne 1f
movw %gs,%cx
cmpw %cx,0x28(%rsp)
jne 1f
/* All segments match their saved values => Category 2 (Bad IRET). */
movq (%rsp),%rcx
CFI_RESTORE rcx
movq 8(%rsp),%r11
CFI_RESTORE r11
addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30
pushq $0
CFI_ADJUST_CFA_OFFSET 8
pushq %r11
CFI_ADJUST_CFA_OFFSET 8
pushq %rcx
CFI_ADJUST_CFA_OFFSET 8
jmp general_protection
CFI_RESTORE_STATE
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp),%rcx
CFI_RESTORE rcx
movq 8(%rsp),%r11
CFI_RESTORE r11
addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30
pushq $0
CFI_ADJUST_CFA_OFFSET 8
SAVE_ALL
jmp error_exit
CFI_ENDPROC
END(xen_failsafe_callback)
#endif /* CONFIG_XEN */
......@@ -24,6 +24,7 @@
#include <asm/pgtable.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/bios.h>
DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
......@@ -40,6 +41,9 @@ EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
short uv_possible_blades;
EXPORT_SYMBOL_GPL(uv_possible_blades);
unsigned long sn_rtc_cycles_per_second;
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t uv_target_cpus(void)
......@@ -272,6 +276,23 @@ static __init void map_mmioh_high(int max_pnode)
map_high("MMIOH", mmioh.s.base, shift, map_uc);
}
static __init void uv_rtc_init(void)
{
long status, ticks_per_sec, drift;
status =
x86_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
&drift);
if (status != 0 || ticks_per_sec < 100000) {
printk(KERN_WARNING
"unable to determine platform RTC clock frequency, "
"guessing.\n");
/* BIOS gives wrong value for clock freq. so guess */
sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
} else
sn_rtc_cycles_per_second = ticks_per_sec;
}
static __init void uv_system_init(void)
{
union uvh_si_addr_map_config_u m_n_config;
......@@ -326,6 +347,8 @@ static __init void uv_system_init(void)
gnode_upper = (((unsigned long)node_id.s.node_id) &
~((1 << n_val) - 1)) << m_val;
uv_rtc_init();
for_each_present_cpu(cpu) {
nid = cpu_to_node(cpu);
pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
......
......@@ -39,6 +39,13 @@ static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
#endif
void __init x86_64_init_pda(void)
{
_cpu_pda = __cpu_pda;
cpu_pda(0) = &_boot_cpu_pda;
pda_init(0);
}
static void __init zap_identity_mappings(void)
{
pgd_t *pgd = pgd_offset_k(0UL);
......@@ -102,9 +109,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
early_printk("Kernel alive\n");
_cpu_pda = __cpu_pda;
cpu_pda(0) = &_boot_cpu_pda;
pda_init(0);
x86_64_init_pda();
early_printk("Kernel really alive\n");
......
......@@ -407,6 +407,7 @@ ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
#include "../../x86/xen/xen-head.S"
.section .bss, "aw", @nobits
.align L1_CACHE_BYTES
......
......@@ -756,7 +756,7 @@ void send_IPI_self(int vector)
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
apic_write_around(APIC_ICR, cfg);
apic_write(APIC_ICR, cfg);
}
#endif /* !CONFIG_SMP */
......@@ -2030,7 +2030,7 @@ static void mask_lapic_irq(unsigned int irq)
unsigned long v;
v = apic_read(APIC_LVT0);
apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
}
static void unmask_lapic_irq(unsigned int irq)
......@@ -2038,7 +2038,7 @@ static void unmask_lapic_irq(unsigned int irq)
unsigned long v;
v = apic_read(APIC_LVT0);
apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
static struct irq_chip lapic_chip __read_mostly = {
......@@ -2168,7 +2168,7 @@ static inline void __init check_timer(void)
* The AEOI mode will finish them in the 8259A
* automatically.
*/
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
......@@ -2177,8 +2177,9 @@ static inline void __init check_timer(void)
pin2 = ioapic_i8259.pin;
apic2 = ioapic_i8259.apic;
printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
vector, apic1, pin1, apic2, pin2);
apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
"apic1=%d pin1=%d apic2=%d pin2=%d\n",
vector, apic1, pin1, apic2, pin2);
/*
* Some BIOS writers are clueless and report the ExtINTA
......@@ -2216,12 +2217,13 @@ static inline void __init check_timer(void)
}
clear_IO_APIC_pin(apic1, pin1);
if (!no_pin1)
printk(KERN_ERR "..MP-BIOS bug: "
"8254 timer not connected to IO-APIC\n");
apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
"8254 timer not connected to IO-APIC\n");
printk(KERN_INFO "...trying to set up timer (IRQ0) "
"through the 8259A ... ");
printk("\n..... (found pin %d) ...", pin2);
apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
"(IRQ0) through the 8259A ...\n");
apic_printk(APIC_QUIET, KERN_INFO
"..... (found apic %d pin %d) ...\n", apic2, pin2);
/*
* legacy devices should be connected to IO APIC #0
*/
......@@ -2230,7 +2232,7 @@ static inline void __init check_timer(void)
unmask_IO_APIC_irq(0);
enable_8259A_irq(0);
if (timer_irq_works()) {
printk("works.\n");
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
timer_through_8259 = 1;
if (nmi_watchdog == NMI_IO_APIC) {
disable_8259A_irq(0);
......@@ -2244,44 +2246,47 @@ static inline void __init check_timer(void)
*/
disable_8259A_irq(0);
clear_IO_APIC_pin(apic2, pin2);
printk(" failed.\n");
apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
}
if (nmi_watchdog == NMI_IO_APIC) {
printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
"through the IO-APIC - disabling NMI Watchdog!\n");
nmi_watchdog = NMI_NONE;
}
timer_ack = 0;
printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as Virtual Wire IRQ...\n");
lapic_register_intr(0, vector);
apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
if (timer_irq_works()) {
printk(" works.\n");
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
disable_8259A_irq(0);
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
printk(" failed.\n");
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as ExtINT IRQ...\n");
init_8259A(0);
make_8259A_irq(0);
apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
apic_write(APIC_LVT0, APIC_DM_EXTINT);
unlock_ExtINT_logic();
if (timer_irq_works()) {
printk(" works.\n");
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
printk(" failed :(.\n");
apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option");
"report. Then try booting with the 'noapic' option.\n");
out:
local_irq_restore(flags);
}
......
......@@ -45,6 +45,7 @@
#include <asm/proto.h>
#include <asm/acpi.h>
#include <asm/dma.h>
#include <asm/i8259.h>
#include <asm/nmi.h>
#include <asm/msidef.h>
#include <asm/hypertransport.h>
......@@ -1696,8 +1697,9 @@ static inline void __init check_timer(void)
pin2 = ioapic_i8259.pin;
apic2 = ioapic_i8259.apic;
apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
cfg->vector, apic1, pin1, apic2, pin2);
apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
"apic1=%d pin1=%d apic2=%d pin2=%d\n",
cfg->vector, apic1, pin1, apic2, pin2);
/*
* Some BIOS writers are clueless and report the ExtINTA
......@@ -1735,14 +1737,13 @@ static inline void __init check_timer(void)
}
clear_IO_APIC_pin(apic1, pin1);
if (!no_pin1)
apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: "
apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
"8254 timer not connected to IO-APIC\n");
apic_printk(APIC_VERBOSE,KERN_INFO
"...trying to set up timer (IRQ0) "
"through the 8259A ... ");
apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
apic2, pin2);
apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
"(IRQ0) through the 8259A ...\n");
apic_printk(APIC_QUIET, KERN_INFO
"..... (found apic %d pin %d) ...\n", apic2, pin2);
/*
* legacy devices should be connected to IO APIC #0
*/
......@@ -1751,7 +1752,7 @@ static inline void __init check_timer(void)
unmask_IO_APIC_irq(0);
enable_8259A_irq(0);
if (timer_irq_works()) {
apic_printk(APIC_VERBOSE," works.\n");
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
timer_through_8259 = 1;
if (nmi_watchdog == NMI_IO_APIC) {
disable_8259A_irq(0);
......@@ -1765,29 +1766,32 @@ static inline void __init check_timer(void)
*/
disable_8259A_irq(0);
clear_IO_APIC_pin(apic2, pin2);
apic_printk(APIC_VERBOSE," failed.\n");
apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
}
if (nmi_watchdog == NMI_IO_APIC) {
printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
"through the IO-APIC - disabling NMI Watchdog!\n");
nmi_watchdog = NMI_NONE;
}
apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as Virtual Wire IRQ...\n");
lapic_register_intr(0);
apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
enable_8259A_irq(0);
if (timer_irq_works()) {
apic_printk(APIC_VERBOSE," works.\n");
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
disable_8259A_irq(0);
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
apic_printk(APIC_VERBOSE," failed.\n");
apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as ExtINT IRQ...\n");
init_8259A(0);
make_8259A_irq(0);
......@@ -1796,11 +1800,12 @@ static inline void __init check_timer(void)
unlock_ExtINT_logic();
if (timer_irq_works()) {
apic_printk(APIC_VERBOSE," works.\n");
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
apic_printk(APIC_VERBOSE," failed :(.\n");
panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option.\n");
out:
local_irq_restore(flags);
}
......
......@@ -103,6 +103,9 @@ void __init io_delay_init(void)
static int __init io_delay_param(char *s)
{
if (!s)
return -EINVAL;
if (!strcmp(s, "0x80"))
io_delay_type = CONFIG_IO_DELAY_TYPE_0X80;
else if (!strcmp(s, "0xed"))
......
......@@ -70,7 +70,7 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector)
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
apic_write_around(APIC_ICR, cfg);
apic_write(APIC_ICR, cfg);
}
void send_IPI_self(int vector)
......@@ -98,7 +98,7 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
* prepare target chip field
*/
cfg = __prepare_ICR2(mask);
apic_write_around(APIC_ICR2, cfg);
apic_write(APIC_ICR2, cfg);
/*
* program the ICR
......@@ -108,7 +108,7 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
apic_write_around(APIC_ICR, cfg);
apic_write(APIC_ICR, cfg);
}
/*
......
......@@ -83,11 +83,8 @@ union irq_ctx {
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
static char softirq_stack[NR_CPUS * THREAD_SIZE]
__attribute__((__section__(".bss.page_aligned")));
static char hardirq_stack[NR_CPUS * THREAD_SIZE]
__attribute__((__section__(".bss.page_aligned")));
static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
static void call_on_stack(void *func, void *stack)
{
......
......@@ -12,9 +12,13 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/setup.h>
struct dentry *arch_debugfs_dir;
EXPORT_SYMBOL(arch_debugfs_dir);
#ifdef CONFIG_DEBUG_BOOT_PARAMS
struct setup_data_node {
u64 paddr;
......@@ -209,6 +213,10 @@ static int __init arch_kdebugfs_init(void)
{
int error = 0;
arch_debugfs_dir = debugfs_create_dir("x86", NULL);
if (!arch_debugfs_dir)
return -ENOMEM;
#ifdef CONFIG_DEBUG_BOOT_PARAMS
error = boot_params_kdebugfs_init();
#endif
......
......@@ -860,7 +860,6 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
resume_execution(cur, regs, kcb);
regs->flags |= kcb->kprobe_saved_flags;
trace_hardirqs_fixup_flags(regs->flags);
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
......
......@@ -150,7 +150,8 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
......@@ -160,6 +161,8 @@ int module_finalize(const Elf_Ehdr *hdr,
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks= s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
}
if (alt) {
......@@ -175,6 +178,11 @@ int module_finalize(const Elf_Ehdr *hdr,
tseg, tseg + text->sh_size);
}
if (para) {
void *pseg = (void *)para->sh_addr;
apply_paravirt(pseg, pseg + para->sh_size);
}
return module_bug_finalize(hdr, sechdrs, me);
}
......
......@@ -27,6 +27,7 @@
#include <asm/bios_ebda.h>
#include <asm/e820.h>
#include <asm/trampoline.h>
#include <asm/setup.h>
#include <mach_apic.h>
#ifdef CONFIG_X86_32
......@@ -48,76 +49,6 @@ static int __init mpf_checksum(unsigned char *mp, int len)
return sum & 0xFF;
}
#ifdef CONFIG_X86_NUMAQ
int found_numaq;
/*
* Have to match translation table entries to main table entries by counter
* hence the mpc_record variable .... can't see a less disgusting way of
* doing this ....
*/
struct mpc_config_translation {
unsigned char mpc_type;
unsigned char trans_len;
unsigned char trans_type;
unsigned char trans_quad;
unsigned char trans_global;
unsigned char trans_local;
unsigned short trans_reserved;
};
static int mpc_record;
static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY]
__cpuinitdata;
static inline int generate_logical_apicid(int quad, int phys_apicid)
{
return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
}
static inline int mpc_apic_id(struct mpc_config_processor *m,
struct mpc_config_translation *translation_record)
{
int quad = translation_record->trans_quad;
int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid);
printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
m->mpc_apicid,
(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
m->mpc_apicver, quad, logical_apicid);
return logical_apicid;
}
int mp_bus_id_to_node[MAX_MP_BUSSES];
int mp_bus_id_to_local[MAX_MP_BUSSES];
static void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
struct mpc_config_translation *translation)
{
int quad = translation->trans_quad;
int local = translation->trans_local;
mp_bus_id_to_node[m->mpc_busid] = quad;
mp_bus_id_to_local[m->mpc_busid] = local;
printk(KERN_INFO "Bus #%d is %s (node %d)\n",
m->mpc_busid, name, quad);
}
int quad_local_to_mp_bus_id [NR_CPUS/4][4];
static void mpc_oem_pci_bus(struct mpc_config_bus *m,
struct mpc_config_translation *translation)
{
int quad = translation->trans_quad;
int local = translation->trans_local;
quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
}
#endif
static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
{
int apicid;
......@@ -127,14 +58,12 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
disabled_cpus++;
return;
}
#ifdef CONFIG_X86_NUMAQ
if (found_numaq)
apicid = mpc_apic_id(m, translation_table[mpc_record]);
if (x86_quirks->mpc_apic_id)
apicid = x86_quirks->mpc_apic_id(m);
else
apicid = m->mpc_apicid;
#else
apicid = m->mpc_apicid;
#endif
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
bootup_cpu = " (Bootup-CPU)";
boot_cpu_physical_apicid = m->mpc_apicid;
......@@ -151,12 +80,10 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
memcpy(str, m->mpc_bustype, 6);
str[6] = 0;
#ifdef CONFIG_X86_NUMAQ
if (found_numaq)
mpc_oem_bus_info(m, str, translation_table[mpc_record]);
#else
printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str);
#endif
if (x86_quirks->mpc_oem_bus_info)
x86_quirks->mpc_oem_bus_info(m, str);
else
printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str);
#if MAX_MP_BUSSES < 256
if (m->mpc_busid >= MAX_MP_BUSSES) {
......@@ -173,10 +100,9 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
#endif
} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
#ifdef CONFIG_X86_NUMAQ
if (found_numaq)
mpc_oem_pci_bus(m, translation_table[mpc_record]);
#endif
if (x86_quirks->mpc_oem_pci_bus)
x86_quirks->mpc_oem_pci_bus(m);
clear_bit(m->mpc_busid, mp_bus_not_pci);
#if defined(CONFIG_EISA) || defined (CONFIG_MCA)
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
......@@ -316,83 +242,6 @@ static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
}
#ifdef CONFIG_X86_NUMAQ
static void __init MP_translation_info(struct mpc_config_translation *m)
{
printk(KERN_INFO
"Translation: record %d, type %d, quad %d, global %d, local %d\n",
mpc_record, m->trans_type, m->trans_quad, m->trans_global,
m->trans_local);
if (mpc_record >= MAX_MPC_ENTRY)
printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
else
translation_table[mpc_record] = m; /* stash this for later */
if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
node_set_online(m->trans_quad);
}
/*
* Read/parse the MPC oem tables
*/
static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
unsigned short oemsize)
{
int count = sizeof(*oemtable); /* the header size */
unsigned char *oemptr = ((unsigned char *)oemtable) + count;
mpc_record = 0;
printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
oemtable);
if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) {
printk(KERN_WARNING
"SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
oemtable->oem_signature[0], oemtable->oem_signature[1],
oemtable->oem_signature[2], oemtable->oem_signature[3]);
return;
}
if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) {
printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
return;
}
while (count < oemtable->oem_length) {
switch (*oemptr) {
case MP_TRANSLATION:
{
struct mpc_config_translation *m =
(struct mpc_config_translation *)oemptr;
MP_translation_info(m);
oemptr += sizeof(*m);
count += sizeof(*m);
++mpc_record;
break;
}
default:
{
printk(KERN_WARNING
"Unrecognised OEM table entry type! - %d\n",
(int)*oemptr);
return;
}
}
}
}
void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
if (strncmp(oem, "IBM NUMA", 8))
printk("Warning! Not a NUMA-Q system!\n");
else
found_numaq = 1;
if (mpc->mpc_oemptr)
smp_read_mpc_oem((struct mp_config_oemtable *)mpc->mpc_oemptr,
mpc->mpc_oemsize);
}
#endif /* CONFIG_X86_NUMAQ */
/*
* Read/parse the MPC
*/
......@@ -457,7 +306,6 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
} else
mps_oem_check(mpc, oem, str);
#endif
/* save the local APIC address, it might be non-default */
if (!acpi_lapic)
mp_lapic_addr = mpc->mpc_lapic;
......@@ -465,12 +313,17 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
if (early)
return 1;
if (mpc->mpc_oemptr && x86_quirks->smp_read_mpc_oem) {
struct mp_config_oemtable *oem_table = (struct mp_config_oemtable *)(unsigned long)mpc->mpc_oemptr;
x86_quirks->smp_read_mpc_oem(oem_table, mpc->mpc_oemsize);
}
/*
* Now process the configuration blocks.
*/
#ifdef CONFIG_X86_NUMAQ
mpc_record = 0;
#endif
if (x86_quirks->mpc_record)
*x86_quirks->mpc_record = 0;
while (count < mpc->mpc_length) {
switch (*mpt) {
case MP_PROCESSOR:
......@@ -536,9 +389,8 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
count = mpc->mpc_length;
break;
}
#ifdef CONFIG_X86_NUMAQ
++mpc_record;
#endif
if (x86_quirks->mpc_record)
(*x86_quirks->mpc_record)++;
}
#ifdef CONFIG_X86_GENERICARCH
......@@ -725,12 +577,6 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
static struct intel_mp_floating *mpf_found;
/*
* Machine specific quirk for finding the SMP config before other setup
* activities destroy the table:
*/
int (*mach_get_smp_config_quirk)(unsigned int early);
/*
* Scan the memory blocks for an SMP configuration block.
*/
......@@ -738,8 +584,8 @@ static void __init __get_smp_config(unsigned int early)
{
struct intel_mp_floating *mpf = mpf_found;
if (mach_get_smp_config_quirk) {
if (mach_get_smp_config_quirk(early))
if (x86_quirks->mach_get_smp_config) {
if (x86_quirks->mach_get_smp_config(early))
return;
}
if (acpi_lapic && early)
......@@ -899,14 +745,12 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
return 0;
}
int (*mach_find_smp_config_quirk)(unsigned int reserve);
static void __init __find_smp_config(unsigned int reserve)
{
unsigned int address;
if (mach_find_smp_config_quirk) {
if (mach_find_smp_config_quirk(reserve))
if (x86_quirks->mach_find_smp_config) {
if (x86_quirks->mach_find_smp_config(reserve))
return;
}
/*
......
......@@ -263,7 +263,7 @@ late_initcall(init_lapic_nmi_sysfs);
static void __acpi_nmi_enable(void *__unused)
{
apic_write_around(APIC_LVT0, APIC_DM_NMI);
apic_write(APIC_LVT0, APIC_DM_NMI);
}
/*
......@@ -277,7 +277,7 @@ void acpi_nmi_enable(void)
static void __acpi_nmi_disable(void *__unused)
{
apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
}
/*
......@@ -448,6 +448,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
#ifdef CONFIG_SYSCTL
static int __init setup_unknown_nmi_panic(char *str)
{
unknown_nmi_panic = 1;
return 1;
}
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
{
unsigned char reason = get_nmi_reason();
......
......@@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/mpspec.h>
#include <asm/e820.h>
#include <asm/setup.h>
#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
......@@ -71,6 +72,188 @@ static void __init smp_dump_qct(void)
}
}
void __init numaq_tsc_disable(void)
{
if (!found_numaq)
return;
if (num_online_nodes() > 1) {
printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
setup_clear_cpu_cap(X86_FEATURE_TSC);
}
}
static int __init numaq_pre_time_init(void)
{
numaq_tsc_disable();
return 0;
}
int found_numaq;
/*
* Have to match translation table entries to main table entries by counter
* hence the mpc_record variable .... can't see a less disgusting way of
* doing this ....
*/
struct mpc_config_translation {
unsigned char mpc_type;
unsigned char trans_len;
unsigned char trans_type;
unsigned char trans_quad;
unsigned char trans_global;
unsigned char trans_local;
unsigned short trans_reserved;
};
/* x86_quirks member */
static int mpc_record;
static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY]
__cpuinitdata;
static inline int generate_logical_apicid(int quad, int phys_apicid)
{
return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
}
/* x86_quirks member */
static int mpc_apic_id(struct mpc_config_processor *m)
{
int quad = translation_table[mpc_record]->trans_quad;
int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid);
printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
m->mpc_apicid,
(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
m->mpc_apicver, quad, logical_apicid);
return logical_apicid;
}
int mp_bus_id_to_node[MAX_MP_BUSSES];
int mp_bus_id_to_local[MAX_MP_BUSSES];
/* x86_quirks member */
static void mpc_oem_bus_info(struct mpc_config_bus *m, char *name)
{
int quad = translation_table[mpc_record]->trans_quad;
int local = translation_table[mpc_record]->trans_local;
mp_bus_id_to_node[m->mpc_busid] = quad;
mp_bus_id_to_local[m->mpc_busid] = local;
printk(KERN_INFO "Bus #%d is %s (node %d)\n",
m->mpc_busid, name, quad);
}
int quad_local_to_mp_bus_id [NR_CPUS/4][4];
/* x86_quirks member */
static void mpc_oem_pci_bus(struct mpc_config_bus *m)
{
int quad = translation_table[mpc_record]->trans_quad;
int local = translation_table[mpc_record]->trans_local;
quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
}
static void __init MP_translation_info(struct mpc_config_translation *m)
{
printk(KERN_INFO
"Translation: record %d, type %d, quad %d, global %d, local %d\n",
mpc_record, m->trans_type, m->trans_quad, m->trans_global,
m->trans_local);
if (mpc_record >= MAX_MPC_ENTRY)
printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
else
translation_table[mpc_record] = m; /* stash this for later */
if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
node_set_online(m->trans_quad);
}
static int __init mpf_checksum(unsigned char *mp, int len)
{
int sum = 0;
while (len--)
sum += *mp++;
return sum & 0xFF;
}
/*
* Read/parse the MPC oem tables
*/
static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
unsigned short oemsize)
{
int count = sizeof(*oemtable); /* the header size */
unsigned char *oemptr = ((unsigned char *)oemtable) + count;
mpc_record = 0;
printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
oemtable);
if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) {
printk(KERN_WARNING
"SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
oemtable->oem_signature[0], oemtable->oem_signature[1],
oemtable->oem_signature[2], oemtable->oem_signature[3]);
return;
}
if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) {
printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
return;
}
while (count < oemtable->oem_length) {
switch (*oemptr) {
case MP_TRANSLATION:
{
struct mpc_config_translation *m =
(struct mpc_config_translation *)oemptr;
MP_translation_info(m);
oemptr += sizeof(*m);
count += sizeof(*m);
++mpc_record;
break;
}
default:
{
printk(KERN_WARNING
"Unrecognised OEM table entry type! - %d\n",
(int)*oemptr);
return;
}
}
}
}
static struct x86_quirks numaq_x86_quirks __initdata = {
.arch_pre_time_init = numaq_pre_time_init,
.arch_time_init = NULL,
.arch_pre_intr_init = NULL,
.arch_memory_setup = NULL,
.arch_intr_init = NULL,
.arch_trap_init = NULL,
.mach_get_smp_config = NULL,
.mach_find_smp_config = NULL,
.mpc_record = &mpc_record,
.mpc_apic_id = mpc_apic_id,
.mpc_oem_bus_info = mpc_oem_bus_info,
.mpc_oem_pci_bus = mpc_oem_pci_bus,
.smp_read_mpc_oem = smp_read_mpc_oem,
};
void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
if (strncmp(oem, "IBM NUMA", 8))
printk("Warning! Not a NUMA-Q system!\n");
else
found_numaq = 1;
}
static __init void early_check_numaq(void)
{
/*
......@@ -82,6 +265,9 @@ static __init void early_check_numaq(void)
*/
if (smp_found_config)
early_get_smp_config();
if (found_numaq)
x86_quirks = &numaq_x86_quirks;
}
int __init get_memcfg_numaq(void)
......@@ -92,14 +278,3 @@ int __init get_memcfg_numaq(void)
smp_dump_qct();
return 1;
}
void __init numaq_tsc_disable(void)
{
if (!found_numaq)
return;
if (num_online_nodes() > 1) {
printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
setup_clear_cpu_cap(X86_FEATURE_TSC);
}
}
......@@ -29,6 +29,7 @@
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/arch_hooks.h>
#include <asm/pgtable.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
......@@ -361,7 +362,6 @@ struct pv_cpu_ops pv_cpu_ops = {
struct pv_apic_ops pv_apic_ops = {
#ifdef CONFIG_X86_LOCAL_APIC
.apic_write = native_apic_write,
.apic_write_atomic = native_apic_write_atomic,
.apic_read = native_apic_read,
.setup_boot_clock = setup_boot_APIC_clock,
.setup_secondary_clock = setup_secondary_APIC_clock,
......@@ -373,6 +373,9 @@ struct pv_mmu_ops pv_mmu_ops = {
#ifndef CONFIG_X86_64
.pagetable_setup_start = native_pagetable_setup_start,
.pagetable_setup_done = native_pagetable_setup_done,
#else
.pagetable_setup_start = paravirt_nop,
.pagetable_setup_done = paravirt_nop,
#endif
.read_cr2 = native_read_cr2,
......
......@@ -36,7 +36,7 @@
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <asm/gart.h>
#include <asm/iommu.h>
#include <asm/calgary.h>
#include <asm/tce.h>
#include <asm/pci-direct.h>
......
......@@ -5,12 +5,11 @@
#include <asm/proto.h>
#include <asm/dma.h>
#include <asm/gart.h>
#include <asm/iommu.h>
#include <asm/calgary.h>
#include <asm/amd_iommu.h>
int forbid_dac __read_mostly;
EXPORT_SYMBOL(forbid_dac);
static int forbid_dac __read_mostly;
const struct dma_mapping_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
......@@ -114,21 +113,15 @@ void __init pci_iommu_alloc(void)
* The order of these functions is important for
* fall-back/fail-over reasons
*/
#ifdef CONFIG_GART_IOMMU
gart_iommu_hole_init();
#endif
#ifdef CONFIG_CALGARY_IOMMU
detect_calgary();
#endif
detect_intel_iommu();
amd_iommu_detect();
#ifdef CONFIG_SWIOTLB
pci_swiotlb_init();
#endif
}
#endif
......@@ -184,9 +177,7 @@ static __init int iommu_setup(char *p)
swiotlb = 1;
#endif
#ifdef CONFIG_GART_IOMMU
gart_parse_options(p);
#endif
#ifdef CONFIG_CALGARY_IOMMU
if (!strncmp(p, "calgary", 7))
......@@ -500,17 +491,13 @@ EXPORT_SYMBOL(dma_free_coherent);
static int __init pci_iommu_init(void)
{
#ifdef CONFIG_CALGARY_IOMMU
calgary_iommu_init();
#endif
intel_iommu_init();
amd_iommu_init();
#ifdef CONFIG_GART_IOMMU
gart_iommu_init();
#endif
no_iommu_init();
return 0;
......
......@@ -32,6 +32,7 @@
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/cacheflush.h>
#include <asm/swiotlb.h>
......
......@@ -7,7 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <asm/gart.h>
#include <asm/iommu.h>
#include <asm/processor.h>
#include <asm/dma.h>
......
......@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <asm/gart.h>
#include <asm/iommu.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
......
......@@ -15,6 +15,7 @@ unsigned long idle_nomwait;
EXPORT_SYMBOL(idle_nomwait);
struct kmem_cache *task_xstate_cachep;
static int force_mwait __cpuinitdata;
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
......@@ -199,6 +200,7 @@ static void poll_idle(void)
*
* idle=mwait overrides this decision and forces the usage of mwait.
*/
static int __cpuinitdata force_mwait;
#define MWAIT_INFO 0x05
#define MWAIT_ECX_EXTENDED_INFO 0x01
......@@ -326,6 +328,9 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
static int __init idle_setup(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "poll")) {
printk("using polling idle threads.\n");
pm_idle = poll_idle;
......
......@@ -537,8 +537,8 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
struct thread_struct *prev = &prev_p->thread;
struct thread_struct *next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
unsigned fsindex, gsindex;
......@@ -586,35 +586,34 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Switch FS and GS.
*
* Segment register != 0 always requires a reload. Also
* reload when it has changed. When prev process used 64bit
* base always reload to avoid an information leak.
*/
{
/* segment register != 0 always requires a reload.
also reload when it has changed.
when prev process used 64bit base always reload
to avoid an information leak. */
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
/* check if the user used a selector != 0
* if yes clear 64bit base, since overloaded base
* is always mapped to the Null selector
*/
if (fsindex)
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
/*
* Check if the user used a selector != 0; if yes
* clear 64bit base, since overloaded base is always
* mapped to the Null selector
*/
if (fsindex)
prev->fs = 0;
}
/* when next process has a 64bit base use it */
if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
if (gsindex)
}
/* when next process has a 64bit base use it */
if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
if (gsindex)
prev->gs = 0;
}
if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
prev->gsindex = gsindex;
}
if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
prev->gsindex = gsindex;
/* Must be after DS reload */
unlazy_fpu(prev_p);
......@@ -627,7 +626,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
write_pda(pcurrent, next_p);
write_pda(kernelstack,
(unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
(unsigned long)task_stack_page(next_p) +
THREAD_SIZE - PDA_STACKOFFSET);
#ifdef CONFIG_CC_STACKPROTECTOR
write_pda(stack_canary, next_p->stack_canary);
/*
......
......@@ -1357,8 +1357,6 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
}
#ifdef CONFIG_X86_32
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
{
struct siginfo info;
......@@ -1377,89 +1375,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
force_sig_info(SIGTRAP, &info, tsk);
}
/* notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
int do_syscall_trace(struct pt_regs *regs, int entryexit)
{
int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
/*
* With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
* interception
*/
int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
int ret = 0;
/* do the secure computing check first */
if (!entryexit)
secure_computing(regs->orig_ax);
if (unlikely(current->audit_context)) {
if (entryexit)
audit_syscall_exit(AUDITSC_RESULT(regs->ax),
regs->ax);
/* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
* on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
* not used, entry.S will call us only on syscall exit, not
* entry; so when TIF_SYSCALL_AUDIT is used we must avoid
* calling send_sigtrap() on syscall entry.
*
* Note that when PTRACE_SYSEMU_SINGLESTEP is used,
* is_singlestep is false, despite his name, so we will still do
* the correct thing.
*/
else if (is_singlestep)
goto out;
}
if (!(current->ptrace & PT_PTRACED))
goto out;
/* If a process stops on the 1st tracepoint with SYSCALL_TRACE
* and then is resumed with SYSEMU_SINGLESTEP, it will come in
* here. We have to check this and return */
if (is_sysemu && entryexit)
return 0;
/* Fake a debug trap */
if (is_singlestep)
send_sigtrap(current, regs, 0);
if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
goto out;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
/* Note that the debugger could change the result of test_thread_flag!*/
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
ret = is_sysemu;
out:
if (unlikely(current->audit_context) && !entryexit)
audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
regs->bx, regs->cx, regs->dx, regs->si);
if (ret == 0)
return 0;
regs->orig_ax = -1; /* force skip of syscall restarting */
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
return 1;
}
#else /* CONFIG_X86_64 */
static void syscall_trace(struct pt_regs *regs)
{
if (!(current->ptrace & PT_PTRACED))
return;
#if 0
printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
......@@ -1481,39 +1400,81 @@ static void syscall_trace(struct pt_regs *regs)
}
}
asmlinkage void syscall_trace_enter(struct pt_regs *regs)
#ifdef CONFIG_X86_32
# define IS_IA32 1
#elif defined CONFIG_IA32_EMULATION
# define IS_IA32 test_thread_flag(TIF_IA32)
#else
# define IS_IA32 0
#endif
/*
* We must return the syscall number to actually look up in the table.
* This can be -1L to skip running any syscall at all.
*/
asmregparm long syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
/*
* If we stepped into a sysenter/syscall insn, it trapped in
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
* If user-mode had set TF itself, then it's still clear from
* do_debug() and we need to set it again to restore the user
* state. If we entered on the slow path, TF was already set.
*/
if (test_thread_flag(TIF_SINGLESTEP))
regs->flags |= X86_EFLAGS_TF;
/* do the secure computing check first */
secure_computing(regs->orig_ax);
if (test_thread_flag(TIF_SYSCALL_TRACE)
&& (current->ptrace & PT_PTRACED))
if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
ret = -1L;
if (ret || test_thread_flag(TIF_SYSCALL_TRACE))
syscall_trace(regs);
if (unlikely(current->audit_context)) {
if (test_thread_flag(TIF_IA32)) {
if (IS_IA32)
audit_syscall_entry(AUDIT_ARCH_I386,
regs->orig_ax,
regs->bx, regs->cx,
regs->dx, regs->si);
} else {
#ifdef CONFIG_X86_64
else
audit_syscall_entry(AUDIT_ARCH_X86_64,
regs->orig_ax,
regs->di, regs->si,
regs->dx, regs->r10);
}
#endif
}
return ret ?: regs->orig_ax;
}
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
asmregparm void syscall_trace_leave(struct pt_regs *regs)
{
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if ((test_thread_flag(TIF_SYSCALL_TRACE)
|| test_thread_flag(TIF_SINGLESTEP))
&& (current->ptrace & PT_PTRACED))
if (test_thread_flag(TIF_SYSCALL_TRACE))
syscall_trace(regs);
}
#endif /* CONFIG_X86_32 */
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
* We already reported this syscall instruction in
* syscall_trace_enter(), so don't do any more now.
*/
if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
return;
/*
* If we are single-stepping, synthesize a trap to follow the
* system call instruction.
*/
if (test_thread_flag(TIF_SINGLESTEP) &&
(current->ptrace & PT_PTRACED))
send_sigtrap(current, regs, 0);
}
......@@ -177,6 +177,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
},
},
{ /* Handle problems with rebooting on Dell T5400's */
.callback = set_bios_reboot,
.ident = "Dell Precision T5400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
},
},
{ /* Handle problems with rebooting on HP laptops */
.callback = set_bios_reboot,
.ident = "HP Compaq Laptop",
......
......@@ -57,12 +57,8 @@
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/kallsyms.h>
#include <linux/edd.h>
#include <linux/iscsi_ibft.h>
#include <linux/kexec.h>
#include <linux/cpufreq.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
......@@ -96,7 +92,7 @@
#include <asm/smp.h>
#include <asm/desc.h>
#include <asm/dma.h>
#include <asm/gart.h>
#include <asm/iommu.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
......@@ -104,7 +100,6 @@
#include <asm/paravirt.h>
#include <asm/percpu.h>
#include <asm/sections.h>
#include <asm/topology.h>
#include <asm/apicdef.h>
#ifdef CONFIG_X86_64
......@@ -579,6 +574,10 @@ static int __init setup_elfcorehdr(char *arg)
early_param("elfcorehdr", setup_elfcorehdr);
#endif
static struct x86_quirks default_x86_quirks __initdata;
struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
......@@ -824,7 +823,10 @@ void __init setup_arch(char **cmdline_p)
vmi_init();
#endif
paravirt_pagetable_setup_start(swapper_pg_dir);
paging_init();
paravirt_pagetable_setup_done(swapper_pg_dir);
paravirt_post_allocator_init();
#ifdef CONFIG_X86_64
map_vsyscall();
......@@ -854,14 +856,6 @@ void __init setup_arch(char **cmdline_p)
init_cpu_to_node();
#endif
#ifdef CONFIG_X86_NUMAQ
/*
* need to check online nodes num, call it
* here before time_init/tsc_init
*/
numaq_tsc_disable();
#endif
init_apic_mappings();
ioapic_init_mappings();
......
......@@ -212,7 +212,7 @@ asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
badframe:
if (show_unhandled_signals && printk_ratelimit()) {
printk(KERN_INFO "%s%s[%d] bad frame in sigreturn frame:"
printk("%s%s[%d] bad frame in sigreturn frame:"
"%p ip:%lx sp:%lx oeax:%lx",
task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
current->comm, task_pid_nr(current), frame, regs->ip,
......@@ -657,12 +657,6 @@ static void do_signal(struct pt_regs *regs)
void
do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP) {
regs->flags |= X86_EFLAGS_TF;
clear_thread_flag(TIF_SINGLESTEP);
}
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
......
......@@ -487,12 +487,6 @@ static void do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP) {
regs->flags |= X86_EFLAGS_TF;
clear_thread_flag(TIF_SINGLESTEP);
}
#ifdef CONFIG_X86_MCE
/* notify userspace of pending MCEs */
if (thread_info_flags & _TIF_MCE_NOTIFY)
......
......@@ -546,8 +546,8 @@ static inline void __inquire_remote_apic(int apicid)
printk(KERN_CONT
"a previous APIC delivery may have failed\n");
apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
timeout = 0;
do {
......@@ -579,11 +579,11 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
int maxlvt;
/* Target chip */
apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
/* Boot on the stack */
/* Kick the second */
apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
Dprintk("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
......@@ -592,14 +592,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
* Give the other CPU some time to accept the IPI.
*/
udelay(200);
/*
* Due to the Pentium erratum 3AP.
*/
maxlvt = lapic_get_maxlvt();
if (maxlvt > 3) {
apic_read_around(APIC_SPIV);
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
}
accept_status = (apic_read(APIC_ESR) & 0xEF);
Dprintk("NMI sent.\n");
......@@ -625,12 +620,14 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
return send_status;
}
maxlvt = lapic_get_maxlvt();
/*
* Be paranoid about clearing APIC errors.
*/
if (APIC_INTEGRATED(apic_version[phys_apicid])) {
apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
}
......@@ -639,13 +636,13 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
/*
* Turn INIT on target chip
*/
apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/*
* Send IPI
*/
apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
| APIC_DM_INIT);
apic_write(APIC_ICR,
APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT);
Dprintk("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
......@@ -655,10 +652,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
Dprintk("Deasserting INIT.\n");
/* Target chip */
apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/* Send IPI */
apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
Dprintk("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
......@@ -689,12 +686,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
*/
Dprintk("#startup loops: %d.\n", num_starts);
maxlvt = lapic_get_maxlvt();
for (j = 1; j <= num_starts; j++) {
Dprintk("Sending STARTUP #%d.\n", j);
apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
Dprintk("After apic_write.\n");
......@@ -703,12 +698,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
*/
/* Target chip */
apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/* Boot on the stack */
/* Kick the second */
apic_write_around(APIC_ICR, APIC_DM_STARTUP
| (start_eip >> 12));
apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12));
/*
* Give the other CPU some time to accept the IPI.
......@@ -724,13 +718,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
* Give the other CPU some time to accept the IPI.
*/
udelay(200);
/*
* Due to the Pentium erratum 3AP.
*/
if (maxlvt > 3) {
apic_read_around(APIC_SPIV);
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
}
accept_status = (apic_read(APIC_ESR) & 0xEF);
if (send_status || accept_status)
break;
......@@ -768,7 +757,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
*
* Must be called after the _cpu_pda pointer table is initialized.
*/
static int __cpuinit get_local_pda(int cpu)
int __cpuinit get_local_pda(int cpu)
{
struct x8664_pda *oldpda, *newpda;
unsigned long size = sizeof(struct x8664_pda);
......@@ -1390,7 +1379,8 @@ static int __init parse_maxcpus(char *arg)
{
extern unsigned int maxcpus;
maxcpus = simple_strtoul(arg, NULL, 0);
if (arg)
maxcpus = simple_strtoul(arg, NULL, 0);
return 0;
}
early_param("maxcpus", parse_maxcpus);
......@@ -105,6 +105,20 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
static int enable_single_step(struct task_struct *child)
{
struct pt_regs *regs = task_pt_regs(child);
unsigned long oflags;
/*
* If we stepped into a sysenter/syscall insn, it trapped in
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
* If user-mode had set TF itself, then it's still clear from
* do_debug() and we need to set it again to restore the user
* state so we don't wrongly set TIF_FORCED_TF below.
* If enable_single_step() was used last and that is what
* set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
* already set and our bookkeeping is fine.
*/
if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
regs->flags |= X86_EFLAGS_TF;
/*
* Always set TIF_SINGLESTEP - this guarantees that
......@@ -113,11 +127,7 @@ static int enable_single_step(struct task_struct *child)
*/
set_tsk_thread_flag(child, TIF_SINGLESTEP);
/*
* If TF was already set, don't do anything else
*/
if (regs->flags & X86_EFLAGS_TF)
return 0;
oflags = regs->flags;
/* Set TF on the kernel stack.. */
regs->flags |= X86_EFLAGS_TF;
......@@ -126,9 +136,22 @@ static int enable_single_step(struct task_struct *child)
* ..but if TF is changed by the instruction we will trace,
* don't mark it as being "us" that set it, so that we
* won't clear it by hand later.
*
* Note that if we don't actually execute the popf because
* of a signal arriving right now or suchlike, we will lose
* track of the fact that it really was "us" that set it.
*/
if (is_setting_trap_flag(child, regs))
if (is_setting_trap_flag(child, regs)) {
clear_tsk_thread_flag(child, TIF_FORCED_TF);
return 0;
}
/*
* If TF was already set, check whether it was us who set it.
* If not, we should never attempt a block step.
*/
if (oflags & X86_EFLAGS_TF)
return test_tsk_thread_flag(child, TIF_FORCED_TF);
set_tsk_thread_flag(child, TIF_FORCED_TF);
......
......@@ -129,6 +129,7 @@ void __init hpet_time_init(void)
*/
void __init time_init(void)
{
pre_time_init_hook();
tsc_init();
late_time_init = choose_time_init();
}
......@@ -58,6 +58,7 @@
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/io.h>
#include <asm/traps.h>
#include "mach_traps.h"
......@@ -77,26 +78,6 @@ char ignore_fpu_irq;
gate_desc idt_table[256]
__attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
asmlinkage void divide_error(void);
asmlinkage void debug(void);
asmlinkage void nmi(void);
asmlinkage void int3(void);
asmlinkage void overflow(void);
asmlinkage void bounds(void);
asmlinkage void invalid_op(void);
asmlinkage void device_not_available(void);
asmlinkage void coprocessor_segment_overrun(void);
asmlinkage void invalid_TSS(void);
asmlinkage void segment_not_present(void);
asmlinkage void stack_segment(void);
asmlinkage void general_protection(void);
asmlinkage void page_fault(void);
asmlinkage void coprocessor_error(void);
asmlinkage void simd_coprocessor_error(void);
asmlinkage void alignment_check(void);
asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void);
int panic_on_unrecovered_nmi;
int kstack_depth_to_print = 24;
static unsigned int code_bytes = 64;
......@@ -256,7 +237,7 @@ static const struct stacktrace_ops print_trace_ops = {
static void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp, char *log_lvl)
unsigned long *stack, unsigned long bp, char *log_lvl)
{
dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
printk("%s =======================\n", log_lvl);
......@@ -383,6 +364,54 @@ int is_valid_bugaddr(unsigned long ip)
return ud2 == 0x0b0f;
}
static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
unsigned __kprobes long oops_begin(void)
{
unsigned long flags;
oops_enter();
if (die_owner != raw_smp_processor_id()) {
console_verbose();
raw_local_irq_save(flags);
__raw_spin_lock(&die_lock);
die_owner = smp_processor_id();
die_nest_count = 0;
bust_spinlocks(1);
} else {
raw_local_irq_save(flags);
}
die_nest_count++;
return flags;
}
void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
bust_spinlocks(0);
die_owner = -1;
add_taint(TAINT_DIE);
__raw_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
if (!regs)
return;
if (kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
do_exit(signr);
}
int __kprobes __die(const char *str, struct pt_regs *regs, long err)
{
unsigned short ss;
......@@ -423,31 +452,9 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
*/
void die(const char *str, struct pt_regs *regs, long err)
{
static struct {
raw_spinlock_t lock;
u32 lock_owner;
int lock_owner_depth;
} die = {
.lock = __RAW_SPIN_LOCK_UNLOCKED,
.lock_owner = -1,
.lock_owner_depth = 0
};
unsigned long flags;
oops_enter();
if (die.lock_owner != raw_smp_processor_id()) {
console_verbose();
raw_local_irq_save(flags);
__raw_spin_lock(&die.lock);
die.lock_owner = smp_processor_id();
die.lock_owner_depth = 0;
bust_spinlocks(1);
} else {
raw_local_irq_save(flags);
}
unsigned long flags = oops_begin();
if (++die.lock_owner_depth < 3) {
if (die_nest_count < 3) {
report_bug(regs->ip, regs);
if (__die(str, regs, err))
......@@ -456,26 +463,7 @@ void die(const char *str, struct pt_regs *regs, long err)
printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
}
bust_spinlocks(0);
die.lock_owner = -1;
add_taint(TAINT_DIE);
__raw_spin_unlock(&die.lock);
raw_local_irq_restore(flags);
if (!regs)
return;
if (kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
do_exit(SIGSEGV);
oops_end(flags, regs, SIGSEGV);
}
static inline void
......
......@@ -51,30 +51,10 @@
#include <asm/pgalloc.h>
#include <asm/proto.h>
#include <asm/pda.h>
#include <asm/traps.h>
#include <mach_traps.h>
asmlinkage void divide_error(void);
asmlinkage void debug(void);
asmlinkage void nmi(void);
asmlinkage void int3(void);
asmlinkage void overflow(void);
asmlinkage void bounds(void);
asmlinkage void invalid_op(void);
asmlinkage void device_not_available(void);
asmlinkage void double_fault(void);
asmlinkage void coprocessor_segment_overrun(void);
asmlinkage void invalid_TSS(void);
asmlinkage void segment_not_present(void);
asmlinkage void stack_segment(void);
asmlinkage void general_protection(void);
asmlinkage void page_fault(void);
asmlinkage void coprocessor_error(void);
asmlinkage void simd_coprocessor_error(void);
asmlinkage void alignment_check(void);
asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void);
int panic_on_unrecovered_nmi;
int kstack_depth_to_print = 12;
static unsigned int code_bytes = 64;
......@@ -355,17 +335,24 @@ static const struct stacktrace_ops print_trace_ops = {
.address = print_trace_address,
};
void show_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp)
static void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp, char *log_lvl)
{
printk("\nCall Trace:\n");
dump_trace(task, regs, stack, bp, &print_trace_ops, NULL);
dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
printk("\n");
}
void show_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp)
{
show_trace_log_lvl(task, regs, stack, bp, "");
}
static void
_show_stack(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, unsigned long bp)
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, unsigned long bp, char *log_lvl)
{
unsigned long *stack;
int i;
......@@ -399,12 +386,12 @@ _show_stack(struct task_struct *task, struct pt_regs *regs,
printk(" %016lx", *stack++);
touch_nmi_watchdog();
}
show_trace(task, regs, sp, bp);
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
_show_stack(task, NULL, sp, 0);
show_stack_log_lvl(task, NULL, sp, 0, "");
}
/*
......@@ -454,7 +441,8 @@ void show_registers(struct pt_regs *regs)
u8 *ip;
printk("Stack: ");
_show_stack(NULL, regs, (unsigned long *)sp, regs->bp);
show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
regs->bp, "");
printk("\n");
printk(KERN_EMERG "Code: ");
......@@ -518,7 +506,7 @@ unsigned __kprobes long oops_begin(void)
}
void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
{
die_owner = -1;
bust_spinlocks(0);
die_nest_count--;
......
......@@ -73,7 +73,7 @@ int is_visws_box(void)
return visws_board_type >= 0;
}
static int __init visws_time_init_quirk(void)
static int __init visws_time_init(void)
{
printk(KERN_INFO "Starting Cobalt Timer system clock\n");
......@@ -93,7 +93,7 @@ static int __init visws_time_init_quirk(void)
return 0;
}
static int __init visws_pre_intr_init_quirk(void)
static int __init visws_pre_intr_init(void)
{
init_VISWS_APIC_irqs();
......@@ -114,7 +114,7 @@ EXPORT_SYMBOL(sgivwfb_mem_size);
long long mem_size __initdata = 0;
static char * __init visws_memory_setup_quirk(void)
static char * __init visws_memory_setup(void)
{
long long gfx_mem_size = 8 * MB;
......@@ -176,7 +176,7 @@ static void visws_machine_power_off(void)
outl(PIIX_SPECIAL_STOP, 0xCFC);
}
static int __init visws_get_smp_config_quirk(unsigned int early)
static int __init visws_get_smp_config(unsigned int early)
{
/*
* Prevent MP-table parsing by the generic code:
......@@ -192,7 +192,7 @@ extern unsigned int __cpuinitdata maxcpus;
* No problem for Linux.
*/
static void __init MP_processor_info (struct mpc_config_processor *m)
static void __init MP_processor_info(struct mpc_config_processor *m)
{
int ver, logical_apicid;
physid_mask_t apic_cpus;
......@@ -232,7 +232,7 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
apic_version[m->mpc_apicid] = ver;
}
int __init visws_find_smp_config_quirk(unsigned int reserve)
static int __init visws_find_smp_config(unsigned int reserve)
{
struct mpc_config_processor *mp = phys_to_virt(CO_CPU_TAB_PHYS);
unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS));
......@@ -258,7 +258,17 @@ int __init visws_find_smp_config_quirk(unsigned int reserve)
return 1;
}
extern int visws_trap_init_quirk(void);
static int visws_trap_init(void);
static struct x86_quirks visws_x86_quirks __initdata = {
.arch_time_init = visws_time_init,
.arch_pre_intr_init = visws_pre_intr_init,
.arch_memory_setup = visws_memory_setup,
.arch_intr_init = NULL,
.arch_trap_init = visws_trap_init,
.mach_get_smp_config = visws_get_smp_config,
.mach_find_smp_config = visws_find_smp_config,
};
void __init visws_early_detect(void)
{
......@@ -272,16 +282,10 @@ void __init visws_early_detect(void)
/*
* Install special quirks for timer, interrupt and memory setup:
*/
arch_time_init_quirk = visws_time_init_quirk;
arch_pre_intr_init_quirk = visws_pre_intr_init_quirk;
arch_memory_setup_quirk = visws_memory_setup_quirk;
/*
* Fall back to generic behavior for traps:
* Override generic MP-table parsing:
*/
arch_intr_init_quirk = NULL;
arch_trap_init_quirk = visws_trap_init_quirk;
x86_quirks = &visws_x86_quirks;
/*
* Install reboot quirks:
......@@ -294,12 +298,6 @@ void __init visws_early_detect(void)
*/
no_broadcast = 0;
/*
* Override generic MP-table parsing:
*/
mach_get_smp_config_quirk = visws_get_smp_config_quirk;
mach_find_smp_config_quirk = visws_find_smp_config_quirk;
#ifdef CONFIG_X86_IO_APIC
/*
* Turn off IO-APIC detection and initialization:
......@@ -426,7 +424,7 @@ static __init void cobalt_init(void)
co_apic_read(CO_APIC_ID));
}
int __init visws_trap_init_quirk(void)
static int __init visws_trap_init(void)
{
lithium_init();
cobalt_init();
......
......@@ -906,7 +906,6 @@ static inline int __init activate_vmi(void)
#ifdef CONFIG_X86_LOCAL_APIC
para_fill(pv_apic_ops.apic_read, APICRead);
para_fill(pv_apic_ops.apic_write, APICWrite);
para_fill(pv_apic_ops.apic_write_atomic, APICWrite);
#endif
/*
......
......@@ -991,7 +991,6 @@ __init void lguest_init(void)
#ifdef CONFIG_X86_LOCAL_APIC
/* apic read/write intercepts */
pv_apic_ops.apic_write = lguest_apic_write;
pv_apic_ops.apic_write_atomic = lguest_apic_write;
pv_apic_ops.apic_read = lguest_apic_read;
#endif
......
......@@ -10,14 +10,6 @@
#include <asm/e820.h>
#include <asm/setup.h>
/*
* Any quirks to be performed to initialize timers/irqs/etc?
*/
int (*arch_time_init_quirk)(void);
int (*arch_pre_intr_init_quirk)(void);
int (*arch_intr_init_quirk)(void);
int (*arch_trap_init_quirk)(void);
#ifdef CONFIG_HOTPLUG_CPU
#define DEFAULT_SEND_IPI (1)
#else
......@@ -37,8 +29,8 @@ int no_broadcast=DEFAULT_SEND_IPI;
**/
void __init pre_intr_init_hook(void)
{
if (arch_pre_intr_init_quirk) {
if (arch_pre_intr_init_quirk())
if (x86_quirks->arch_pre_intr_init) {
if (x86_quirks->arch_pre_intr_init())
return;
}
init_ISA_irqs();
......@@ -64,8 +56,8 @@ static struct irqaction irq2 = {
**/
void __init intr_init_hook(void)
{
if (arch_intr_init_quirk) {
if (arch_intr_init_quirk())
if (x86_quirks->arch_intr_init) {
if (x86_quirks->arch_intr_init())
return;
}
#ifdef CONFIG_X86_LOCAL_APIC
......@@ -97,8 +89,8 @@ void __init pre_setup_arch_hook(void)
**/
void __init trap_init_hook(void)
{
if (arch_trap_init_quirk) {
if (arch_trap_init_quirk())
if (x86_quirks->arch_trap_init) {
if (x86_quirks->arch_trap_init())
return;
}
}
......@@ -110,6 +102,16 @@ static struct irqaction irq0 = {
.name = "timer"
};
/**
* pre_time_init_hook - do any specific initialisations before.
*
**/
void __init pre_time_init_hook(void)
{
if (x86_quirks->arch_pre_time_init)
x86_quirks->arch_pre_time_init();
}
/**
* time_init_hook - do any specific initialisations for the system timer.
*
......@@ -119,13 +121,13 @@ static struct irqaction irq0 = {
**/
void __init time_init_hook(void)
{
if (arch_time_init_quirk) {
if (x86_quirks->arch_time_init) {
/*
* A nonzero return code does not mean failure, it means
* that the architecture quirk does not want any
* generic (timer) setup to be performed after this:
*/
if (arch_time_init_quirk())
if (x86_quirks->arch_time_init())
return;
}
......
......@@ -21,3 +21,4 @@ obj-$(CONFIG_K8_NUMA) += k8topology_64.o
endif
obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o
obj-$(CONFIG_MEMTEST) += memtest.o
......@@ -844,6 +844,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
reserve_early(table_start << PAGE_SHIFT,
table_end << PAGE_SHIFT, "PGTABLE");
if (!after_init_bootmem)
early_memtest(start, end);
return end >> PAGE_SHIFT;
}
......@@ -868,8 +871,6 @@ void __init paging_init(void)
*/
sparse_init();
zone_sizes_init();
paravirt_post_allocator_init();
}
/*
......
......@@ -517,118 +517,6 @@ static void __init init_gbpages(void)
direct_gbpages = 0;
}
#ifdef CONFIG_MEMTEST
static void __init memtest(unsigned long start_phys, unsigned long size,
unsigned pattern)
{
unsigned long i;
unsigned long *start;
unsigned long start_bad;
unsigned long last_bad;
unsigned long val;
unsigned long start_phys_aligned;
unsigned long count;
unsigned long incr;
switch (pattern) {
case 0:
val = 0UL;
break;
case 1:
val = -1UL;
break;
case 2:
val = 0x5555555555555555UL;
break;
case 3:
val = 0xaaaaaaaaaaaaaaaaUL;
break;
default:
return;
}
incr = sizeof(unsigned long);
start_phys_aligned = ALIGN(start_phys, incr);
count = (size - (start_phys_aligned - start_phys))/incr;
start = __va(start_phys_aligned);
start_bad = 0;
last_bad = 0;
for (i = 0; i < count; i++)
start[i] = val;
for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
if (*start != val) {
if (start_phys_aligned == last_bad + incr) {
last_bad += incr;
} else {
if (start_bad) {
printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved",
val, start_bad, last_bad + incr);
reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
}
start_bad = last_bad = start_phys_aligned;
}
}
}
if (start_bad) {
printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved",
val, start_bad, last_bad + incr);
reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
}
}
/* default is disabled */
static int memtest_pattern __initdata;
static int __init parse_memtest(char *arg)
{
if (arg)
memtest_pattern = simple_strtoul(arg, NULL, 0);
return 0;
}
early_param("memtest", parse_memtest);
static void __init early_memtest(unsigned long start, unsigned long end)
{
u64 t_start, t_size;
unsigned pattern;
if (!memtest_pattern)
return;
printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
for (pattern = 0; pattern < memtest_pattern; pattern++) {
t_start = start;
t_size = 0;
while (t_start < end) {
t_start = find_e820_area_size(t_start, &t_size, 1);
/* done ? */
if (t_start >= end)
break;
if (t_start + t_size > end)
t_size = end - t_start;
printk(KERN_CONT "\n %016llx - %016llx pattern %d",
(unsigned long long)t_start,
(unsigned long long)t_start + t_size, pattern);
memtest(t_start, t_size, pattern);
t_start += t_size;
}
}
printk(KERN_CONT "\n");
}
#else
static void __init early_memtest(unsigned long start, unsigned long end)
{
}
#endif
static unsigned long __init kernel_physical_mapping_init(unsigned long start,
unsigned long end,
unsigned long page_size_mask)
......
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pfn.h>
#include <asm/e820.h>
static void __init memtest(unsigned long start_phys, unsigned long size,
unsigned pattern)
{
unsigned long i;
unsigned long *start;
unsigned long start_bad;
unsigned long last_bad;
unsigned long val;
unsigned long start_phys_aligned;
unsigned long count;
unsigned long incr;
switch (pattern) {
case 0:
val = 0UL;
break;
case 1:
val = -1UL;
break;
case 2:
#ifdef CONFIG_X86_64
val = 0x5555555555555555UL;
#else
val = 0x55555555UL;
#endif
break;
case 3:
#ifdef CONFIG_X86_64
val = 0xaaaaaaaaaaaaaaaaUL;
#else
val = 0xaaaaaaaaUL;
#endif
break;
default:
return;
}
incr = sizeof(unsigned long);
start_phys_aligned = ALIGN(start_phys, incr);
count = (size - (start_phys_aligned - start_phys))/incr;
start = __va(start_phys_aligned);
start_bad = 0;
last_bad = 0;
for (i = 0; i < count; i++)
start[i] = val;
for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
if (*start != val) {
if (start_phys_aligned == last_bad + incr) {
last_bad += incr;
} else {
if (start_bad) {
printk(KERN_CONT "\n %010lx bad mem addr %010lx - %010lx reserved",
val, start_bad, last_bad + incr);
reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
}
start_bad = last_bad = start_phys_aligned;
}
}
}
if (start_bad) {
printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved",
val, start_bad, last_bad + incr);
reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
}
}
/* default is disabled */
static int memtest_pattern __initdata;
static int __init parse_memtest(char *arg)
{
if (arg)
memtest_pattern = simple_strtoul(arg, NULL, 0);
return 0;
}
early_param("memtest", parse_memtest);
void __init early_memtest(unsigned long start, unsigned long end)
{
u64 t_start, t_size;
unsigned pattern;
if (!memtest_pattern)
return;
printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
for (pattern = 0; pattern < memtest_pattern; pattern++) {
t_start = start;
t_size = 0;
while (t_start < end) {
t_start = find_e820_area_size(t_start, &t_size, 1);
/* done ? */
if (t_start >= end)
break;
if (t_start + t_size > end)
t_size = end - t_start;
printk(KERN_CONT "\n %010llx - %010llx pattern %d",
(unsigned long long)t_start,
(unsigned long long)t_start + t_size, pattern);
memtest(t_start, t_size, pattern);
t_start += t_size;
}
}
printk(KERN_CONT "\n");
}
......@@ -12,6 +12,8 @@
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/msr.h>
#include <asm/tlbflush.h>
......@@ -489,3 +491,89 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
free_memtype(addr, addr + size);
}
#if defined(CONFIG_DEBUG_FS)
/* get Nth element of the linked list */
static struct memtype *memtype_get_idx(loff_t pos)
{
struct memtype *list_node, *print_entry;
int i = 1;
print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
if (!print_entry)
return NULL;
spin_lock(&memtype_lock);
list_for_each_entry(list_node, &memtype_list, nd) {
if (pos == i) {
*print_entry = *list_node;
spin_unlock(&memtype_lock);
return print_entry;
}
++i;
}
spin_unlock(&memtype_lock);
kfree(print_entry);
return NULL;
}
static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos == 0) {
++*pos;
seq_printf(seq, "PAT memtype list:\n");
}
return memtype_get_idx(*pos);
}
static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return memtype_get_idx(*pos);
}
static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}
static int memtype_seq_show(struct seq_file *seq, void *v)
{
struct memtype *print_entry = (struct memtype *)v;
seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
print_entry->start, print_entry->end);
kfree(print_entry);
return 0;
}
static struct seq_operations memtype_seq_ops = {
.start = memtype_seq_start,
.next = memtype_seq_next,
.stop = memtype_seq_stop,
.show = memtype_seq_show,
};
static int memtype_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &memtype_seq_ops);
}
static const struct file_operations memtype_fops = {
.open = memtype_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init pat_memtype_list_init(void)
{
debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
NULL, &memtype_fops);
return 0;
}
late_initcall(pat_memtype_list_init);
#endif /* CONFIG_DEBUG_FS */
......@@ -5,13 +5,13 @@ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o
obj-$(CONFIG_PCI_DIRECT) += direct.o
obj-$(CONFIG_PCI_OLPC) += olpc.o
pci-y := fixup.o
pci-$(CONFIG_ACPI) += acpi.o
pci-y += legacy.o irq.o
obj-y += fixup.o
obj-$(CONFIG_ACPI) += acpi.o
obj-y += legacy.o irq.o
pci-$(CONFIG_X86_VISWS) += visws.o
obj-$(CONFIG_X86_VISWS) += visws.o
pci-$(CONFIG_X86_NUMAQ) += numa.o
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-y += $(pci-y) common.o early.o
obj-y += common.o early.o
obj-y += amd_bus.o
......@@ -57,14 +57,17 @@ static int __init pci_legacy_init(void)
int __init pci_subsys_init(void)
{
#ifdef CONFIG_X86_NUMAQ
pci_numaq_init();
#endif
#ifdef CONFIG_ACPI
pci_acpi_init();
#endif
#ifdef CONFIG_X86_VISWS
pci_visws_init();
#endif
pci_legacy_init();
pcibios_irq_init();
#ifdef CONFIG_X86_NUMAQ
pci_numa_init();
#endif
pcibios_init();
return 0;
......
/*
* numa.c - Low-level PCI access for NUMA-Q machines
* numaq_32.c - Low-level PCI access for NUMA-Q machines
*/
#include <linux/pci.h>
......@@ -151,7 +151,7 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx);
int __init pci_numa_init(void)
int __init pci_numaq_init(void)
{
int quad;
......
......@@ -108,7 +108,8 @@ extern void __init dmi_check_skip_isa_align(void);
/* some common used subsys_initcalls */
extern int __init pci_acpi_init(void);
extern int __init pcibios_irq_init(void);
extern int __init pci_numa_init(void);
extern int __init pci_visws_init(void);
extern int __init pci_numaq_init(void);
extern int __init pcibios_init(void);
/* pci-mmconfig.c */
......
......@@ -86,8 +86,14 @@ void __init pcibios_update_irq(struct pci_dev *dev, int irq)
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
static int __init pci_visws_init(void)
int __init pci_visws_init(void)
{
if (!is_visws_box())
return -1;
pcibios_enable_irq = &pci_visws_enable_irq;
pcibios_disable_irq = &pci_visws_disable_irq;
/* The VISWS supports configuration access type 1 only */
pci_probe = (pci_probe | PCI_PROBE_CONF1) &
~(PCI_PROBE_BIOS | PCI_PROBE_CONF2);
......@@ -105,18 +111,3 @@ static int __init pci_visws_init(void)
pcibios_resource_survey();
return 0;
}
static __init int pci_subsys_init(void)
{
if (!is_visws_box())
return -1;
pcibios_enable_irq = &pci_visws_enable_irq;
pcibios_disable_irq = &pci_visws_disable_irq;
pci_visws_init();
pcibios_init();
return 0;
}
subsys_initcall(pci_subsys_init);
......@@ -62,7 +62,7 @@ $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
# Build multiple 32-bit vDSO images to choose from at boot time.
#
obj-$(VDSO32-y) += vdso32-syms.lds
vdso32.so-$(CONFIG_X86_32) += int80
vdso32.so-$(VDSO32-y) += int80
vdso32.so-$(CONFIG_COMPAT) += syscall
vdso32.so-$(VDSO32-y) += sysenter
......
......@@ -193,17 +193,12 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr)
}
}
/*
* These symbols are defined by vdso32.S to mark the bounds
* of the ELF DSO images included therein.
*/
extern const char vdso32_default_start, vdso32_default_end;
extern const char vdso32_sysenter_start, vdso32_sysenter_end;
static struct page *vdso32_pages[1];
#ifdef CONFIG_X86_64
#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SYSENTER32))
#define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32))
/* May not be __init: called during resume */
void syscall32_cpu_init(void)
......@@ -226,6 +221,7 @@ static inline void map_compat_vdso(int map)
#else /* CONFIG_X86_32 */
#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
#define vdso32_syscall() (0)
void enable_sep_cpu(void)
{
......@@ -296,12 +292,15 @@ int __init sysenter_setup(void)
gate_vma_init();
#endif
if (!vdso32_sysenter()) {
vsyscall = &vdso32_default_start;
vsyscall_len = &vdso32_default_end - &vdso32_default_start;
} else {
if (vdso32_syscall()) {
vsyscall = &vdso32_syscall_start;
vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start;
} else if (vdso32_sysenter()){
vsyscall = &vdso32_sysenter_start;
vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
} else {
vsyscall = &vdso32_int80_start;
vsyscall_len = &vdso32_int80_end - &vdso32_int80_start;
}
memcpy(syscall_page, vsyscall, vsyscall_len);
......
......@@ -2,14 +2,17 @@
__INITDATA
.globl vdso32_default_start, vdso32_default_end
vdso32_default_start:
#ifdef CONFIG_X86_32
.globl vdso32_int80_start, vdso32_int80_end
vdso32_int80_start:
.incbin "arch/x86/vdso/vdso32-int80.so"
#else
vdso32_int80_end:
.globl vdso32_syscall_start, vdso32_syscall_end
vdso32_syscall_start:
#ifdef CONFIG_COMPAT
.incbin "arch/x86/vdso/vdso32-syscall.so"
#endif
vdso32_default_end:
vdso32_syscall_end:
.globl vdso32_sysenter_start, vdso32_sysenter_end
vdso32_sysenter_start:
......
This diff is collapsed.
This diff is collapsed.
obj-y := enlighten.o setup.o multicalls.o mmu.o \
time.o xen-asm.o grant-table.o suspend.o
time.o xen-asm_$(BITS).o grant-table.o suspend.o
obj-$(CONFIG_SMP) += smp.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -76,6 +76,7 @@ void xen_mc_flush(void)
if (ret) {
printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
ret, smp_processor_id());
dump_stack();
for (i = 0; i < b->mcidx; i++) {
printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
i+1, b->mcidx,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment