Commit 2ba74897 authored by Andi Kleen's avatar Andi Kleen Committed by David S. Miller

[PATCH] x86-64 merge

Make it compile again and various cleanups and a few bug fixes.  Only
changes x86-64 specific files.

Most of it are S3 suspend changes from Pavel and comment spelling fixes
from Steven Cole.

- Remove now obsolete check_cpu function
- Fix sys_ioctl prototype
- Small optimization - use SYSCALL for 32bit signal handling.
- Fix S3 suspend handling and split into individual files like i386 (Pavel)
- Merge from i386 (pci fixes etc.)
- Set correct paging attributes for IOMMU aperture
- Fix disable apic option
parent e7507c16
...@@ -327,89 +327,6 @@ void close_output_buffer_if_we_run_high(struct moveparams *mv) ...@@ -327,89 +327,6 @@ void close_output_buffer_if_we_run_high(struct moveparams *mv)
} }
} }
void check_cpu(void)
{
unsigned before, after, flags;
unsigned a,b,c,d;
int isamd;
/* check if the CPU supports CPUID. This is done by testing if the CPU
supports changing the ID bit (21) in EFLAGS. */
asm("pushfl ; "
"popl %0 ; " /* get EFLAGS */
"movl %0,%1 ; "
"xorl $(1<<21),%0 ; " /* toggle bit 21 */
"pushl %0 ; "
"popfl ; "
"pushfl ; " /* get EFLAGS again */
"popl %0 " : "=r" (after), "=r" (before));
if (before == after)
error("Your CPU doesn't support CPUID.");
/* check if it supports AMD extended cpuid reporting */
asm("cpuid" : "=a" (a), "=b" (b), "=c" (c), "=d" (d) : "0" (0x80000000));
if (a < 0x80000001)
error("Your CPU doesn't support AMD extended CPUIDs.");
/* AuthenticAMD */
isamd = (b == 0x68747541) && (d == 0x69746e65) && (c == 0x444d4163);
/* check required feature flags */
/* see http://www.x86-64.org/lists/discuss/msg02971.html */
#define REQUIRED_MASK1 ((1<<0)|(1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<8)|(1<<11)| \
(1<<13)|(1<<15)|(1<<24))
asm("cpuid" : "=d" (flags), "=a" (a) : "1" (0x80000001) : "ebx", "ecx");
flags &= REQUIRED_MASK1;
flags ^= REQUIRED_MASK1;
if (flags & (1<<9)) {
puts("WARNING: non APIC mode for long mode kernel is untested.");
puts("In case of trouble use 32bit kernel or enable APIC.");
}
if (flags & (1<<0))
error("CPU misses x87 FPU");
if (flags & (1<<3))
error("CPU doesn't support page size extension (PSE)");
if (flags & (1<<4))
error("CPU misses an time stamp counter");
if (flags & (1<<5))
error("CPU misses AMD style MSRs");
if (flags & (1<<6))
error("CPU misses physical address extension (PAE)");
if (flags & (1<<8))
error("CPU misses cmpxchg8");
if (flags & (1<<11))
error("CPU doesn't support SYSCALL/SYSRET");
if (flags & (1<<13))
error("CPU doesn't support PGE");
if (flags & (1<<15))
error("CPU doesn't support CMOV");
if (flags & (1<<24))
error("CPU doesn't support FXSAVE/FXRSTOR");
if (flags & (1<<29))
error("CPU doesn't support long mode");
#define SSE_MASK ((1<<25)|(1<<26))
asm("cpuid" : "=d" (flags), "=a" (a) : "1" (1) : "ebx", "ecx");
if ((flags & SSE_MASK) != SSE_MASK && isamd) {
/* Only try this on AMD CPUs. */
/* Enable SSE in HWCFG MSR */
asm volatile("rdmsr" : "=d" (d), "=a" (flags) : "c" (0xc0010015));
flags &= ~(1<<15);
asm volatile("wrmsr" :: "d" (d), "a" (flags), "c" (0xc0010015));
}
/* Try again */
asm("cpuid" : "=d" (flags), "=a" (a) : "1" (1) : "ebx", "ecx");
flags &= SSE_MASK;
flags ^= SSE_MASK;
if (flags & (1<<25))
error("CPU doesn't support SSE1");
if (flags & (1<<26))
error("CPU doesn't support SSE2");
}
int decompress_kernel(struct moveparams *mv, void *rmode) int decompress_kernel(struct moveparams *mv, void *rmode)
{ {
real_mode = rmode; real_mode = rmode;
......
/* /*
* Written 2000,2002 by Andi Kleen. * Written 2000,2002 by Andi Kleen.
* *
* Losely based on the sparc64 and IA64 32bit emulation loaders. * Loosely based on the sparc64 and IA64 32bit emulation loaders.
* This tricks binfmt_elf.c into loading 32bit binaries using lots * This tricks binfmt_elf.c into loading 32bit binaries using lots
* of ugly preprocessor tricks. Talk about very very poor man's inheritance. * of ugly preprocessor tricks. Talk about very very poor man's inheritance.
*/ */
......
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
#define EXT2_IOC32_GETVERSION _IOR('v', 1, int) #define EXT2_IOC32_GETVERSION _IOR('v', 1, int)
#define EXT2_IOC32_SETVERSION _IOW('v', 2, int) #define EXT2_IOC32_SETVERSION _IOW('v', 2, int)
extern asmlinkage int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); extern asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg) static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
{ {
......
...@@ -247,6 +247,8 @@ asmlinkage long sys32_sigreturn(struct pt_regs regs) ...@@ -247,6 +247,8 @@ asmlinkage long sys32_sigreturn(struct pt_regs regs)
sigset_t set; sigset_t set;
unsigned int eax; unsigned int eax;
set_thread_flag(TIF_IRET);
if (verify_area(VERIFY_READ, frame, sizeof(*frame))) if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe; goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask) if (__get_user(set.sig[0], &frame->sc.oldmask)
...@@ -277,6 +279,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs regs) ...@@ -277,6 +279,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs regs)
stack_t st; stack_t st;
unsigned int eax; unsigned int eax;
set_thread_flag(TIF_IRET);
if (verify_area(VERIFY_READ, frame, sizeof(*frame))) if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe; goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
......
...@@ -82,7 +82,7 @@ cstar_sysret: ...@@ -82,7 +82,7 @@ cstar_sysret:
GET_THREAD_INFO(%r10) GET_THREAD_INFO(%r10)
cli cli
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call jnz 1f
RESTORE_ARGS 1,-ARG_SKIP,1,1 RESTORE_ARGS 1,-ARG_SKIP,1,1
movl RIP-ARGOFFSET(%rsp),%ecx movl RIP-ARGOFFSET(%rsp),%ecx
movl EFLAGS-ARGOFFSET(%rsp),%r11d movl EFLAGS-ARGOFFSET(%rsp),%r11d
...@@ -90,6 +90,10 @@ cstar_sysret: ...@@ -90,6 +90,10 @@ cstar_sysret:
swapgs swapgs
sysretl sysretl
1:
btc $TIF_IRET,threadinfo_flags(%r10)
jmp int_ret_from_sys_call
cstar_tracesys: cstar_tracesys:
SAVE_REST SAVE_REST
movq $-ENOSYS,RAX(%rsp) /* really needed? */ movq $-ENOSYS,RAX(%rsp) /* really needed? */
...@@ -121,7 +125,7 @@ cstar_badarg: ...@@ -121,7 +125,7 @@ cstar_badarg:
* Arguments are zero extended. For system calls that want sign extension and * Arguments are zero extended. For system calls that want sign extension and
* take long arguments a wrapper is needed. Most calls can just be called * take long arguments a wrapper is needed. Most calls can just be called
* directly. * directly.
* Assumes it is only called from user space and entered with interrups off. * Assumes it is only called from user space and entered with interrupts off.
*/ */
ENTRY(ia32_syscall) ENTRY(ia32_syscall)
......
...@@ -27,13 +27,13 @@ asm(" .code32\n" ...@@ -27,13 +27,13 @@ asm(" .code32\n"
"sig32_rt_tramp:\n" "sig32_rt_tramp:\n"
" movl $" __stringify(__NR_ia32_rt_sigreturn) ",%eax\n" " movl $" __stringify(__NR_ia32_rt_sigreturn) ",%eax\n"
" int $0x80\n" " syscall\n"
"sig32_rt_tramp_end:\n" "sig32_rt_tramp_end:\n"
"sig32_tramp:\n" "sig32_tramp:\n"
" popl %eax\n" " popl %eax\n"
" movl $" __stringify(__NR_ia32_sigreturn) ",%eax\n" " movl $" __stringify(__NR_ia32_sigreturn) ",%eax\n"
" int $0x80\n" " syscall\n"
"sig32_tramp_end:\n" "sig32_tramp_end:\n"
" .code64\n"); " .code64\n");
...@@ -44,7 +44,7 @@ extern unsigned char sig32_tramp[], sig32_tramp_end[]; ...@@ -44,7 +44,7 @@ extern unsigned char sig32_tramp[], sig32_tramp_end[];
char *syscall32_page; char *syscall32_page;
/* RED-PEN: This knows too much about high level VM */ /* RED-PEN: This knows too much about high level VM */
/* Alternative would be to generate a vma with appropiate backing options /* Alternative would be to generate a vma with appropriate backing options
and let it be handled by generic VM */ and let it be handled by generic VM */
int map_syscall32(struct mm_struct *mm, unsigned long address) int map_syscall32(struct mm_struct *mm, unsigned long address)
{ {
......
...@@ -300,7 +300,7 @@ acpi_boot_init (void) ...@@ -300,7 +300,7 @@ acpi_boot_init (void)
/* /*
* The default interrupt routing model is PIC (8259). This gets * The default interrupt routing model is PIC (8259). This gets
* overriden if IOAPICs are enumerated (below). * overridden if IOAPICs are enumerated (below).
*/ */
acpi_irq_model = ACPI_IRQ_MODEL_PIC; acpi_irq_model = ACPI_IRQ_MODEL_PIC;
...@@ -318,6 +318,9 @@ acpi_boot_init (void) ...@@ -318,6 +318,9 @@ acpi_boot_init (void)
} else } else
printk(KERN_NOTICE PREFIX "BIOS passes blacklist\n"); printk(KERN_NOTICE PREFIX "BIOS passes blacklist\n");
extern int disable_apic;
if (disable_apic)
return 0;
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
...@@ -345,7 +348,7 @@ acpi_boot_init (void) ...@@ -345,7 +348,7 @@ acpi_boot_init (void)
* Local APIC * Local APIC
* ---------- * ----------
* Note that the LAPIC address is obtained from the MADT (32-bit value) * Note that the LAPIC address is obtained from the MADT (32-bit value)
* and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). * and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value).
*/ */
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr); result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
......
obj-$(CONFIG_ACPI_BOOT) := boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
This diff is collapsed.
/*
* acpi.c - Architecture-Specific Low-Level ACPI Support
*
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
* Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
* Copyright (C) 2003 Pavel Machek, SuSE Labs
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/acpi.h>
#include <asm/mpspec.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io_apic.h>
#include <asm/proto.h>
#include <asm/tlbflush.h>
/* --------------------------------------------------------------------------
Low-Level Sleep Support
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_SLEEP
/* address in low memory of the wakeup routine. */
unsigned long acpi_wakeup_address = 0;
extern char wakeup_start, wakeup_end;
extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
static void init_low_mapping(void)
{
cpu_pda[0].level4_pgt[0] = cpu_pda[0].level4_pgt[pml4_index(PAGE_OFFSET)];
flush_tlb_all();
}
/**
* acpi_save_state_mem - save kernel state
*
* Create an identity mapped page table and copy the wakeup routine to
* low memory.
*/
int acpi_save_state_mem (void)
{
init_low_mapping();
memcpy((void *) acpi_wakeup_address, &wakeup_start, &wakeup_end - &wakeup_start);
acpi_copy_wakeup_routine(acpi_wakeup_address);
return 0;
}
/**
* acpi_save_state_disk - save kernel state to disk
*
*/
int acpi_save_state_disk (void)
{
return 1;
}
/*
* acpi_restore_state
*/
void acpi_restore_state_mem (void)
{
cpu_pda[0].level4_pgt[0] = 0;
flush_tlb_all();
}
/**
* acpi_reserve_bootmem - do _very_ early ACPI initialisation
*
* We allocate a page in low memory for the wakeup
* routine for when we come back from a sleep state. The
* runtime allocator allows specification of <16M pages, but not
* <1M pages.
*/
void __init acpi_reserve_bootmem(void)
{
acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
if ((&wakeup_end - &wakeup_start) > PAGE_SIZE)
printk(KERN_CRIT "ACPI: Wakeup code way too big, will crash on attempt to suspend\n");
printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address);
}
#endif /*CONFIG_ACPI_SLEEP*/
void acpi_pci_link_exit(void) {}
This diff is collapsed.
...@@ -376,7 +376,7 @@ void __init setup_local_APIC (void) ...@@ -376,7 +376,7 @@ void __init setup_local_APIC (void)
* Set up LVT0, LVT1: * Set up LVT0, LVT1:
* *
* set up through-local-APIC on the BP's LINT0. This is not * set up through-local-APIC on the BP's LINT0. This is not
* strictly necessery in pure symmetric-IO mode, but sometimes * strictly necessary in pure symmetric-IO mode, but sometimes
* we delegate interrupts to the 8259A. * we delegate interrupts to the 8259A.
*/ */
/* /*
...@@ -935,7 +935,7 @@ void smp_local_timer_interrupt(struct pt_regs *regs) ...@@ -935,7 +935,7 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
/* /*
* We take the 'long' return path, and there every subsystem * We take the 'long' return path, and there every subsystem
* grabs the apropriate locks (kernel lock/ irq lock). * grabs the appropriate locks (kernel lock/ irq lock).
* *
* we might want to decouple profiling from the 'long path', * we might want to decouple profiling from the 'long path',
* and do the profiling totally in assembly. * and do the profiling totally in assembly.
......
...@@ -64,6 +64,7 @@ static inline void do_cpuid(int cpu, u32 reg, u32 *data) ...@@ -64,6 +64,7 @@ static inline void do_cpuid(int cpu, u32 reg, u32 *data)
{ {
struct cpuid_command cmd; struct cpuid_command cmd;
preempt_disable();
if ( cpu == smp_processor_id() ) { if ( cpu == smp_processor_id() ) {
cpuid(reg, &data[0], &data[1], &data[2], &data[3]); cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
} else { } else {
...@@ -73,6 +74,7 @@ static inline void do_cpuid(int cpu, u32 reg, u32 *data) ...@@ -73,6 +74,7 @@ static inline void do_cpuid(int cpu, u32 reg, u32 *data)
smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1); smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
} }
preempt_enable();
} }
#else /* ! CONFIG_SMP */ #else /* ! CONFIG_SMP */
......
...@@ -54,7 +54,7 @@ static struct console early_vga_console = { ...@@ -54,7 +54,7 @@ static struct console early_vga_console = {
.index = -1, .index = -1,
}; };
/* Serial functions losely based on a similar package from Klaus P. Gerlicher */ /* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
int early_serial_base = 0x3f8; /* ttyS0 */ int early_serial_base = 0x3f8; /* ttyS0 */
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
.text .text
.code32 .code32
/* %bx: 1 if comming from smp trampoline on secondary cpu */ /* %bx: 1 if coming from smp trampoline on secondary cpu */
startup_32: startup_32:
/* /*
...@@ -194,7 +194,8 @@ ENTRY(no_long_mode) ...@@ -194,7 +194,8 @@ ENTRY(no_long_mode)
jmp 1b jmp 1b
.org 0xf00 .org 0xf00
ENTRY(pGDT32): .globl pGDT32
pGDT32:
.word gdt32_end-gdt_table32 .word gdt32_end-gdt_table32
.long gdt_table32-__START_KERNEL_map .long gdt_table32-__START_KERNEL_map
......
...@@ -49,7 +49,7 @@ void __init fpu_init(void) ...@@ -49,7 +49,7 @@ void __init fpu_init(void)
/* /*
* The _current_ task is using the FPU for the first time * The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default. * so initialize it and set the mxcsr to its default.
* remeber the current task has used the FPU. * remember the current task has used the FPU.
*/ */
void init_fpu(struct task_struct *child) void init_fpu(struct task_struct *child)
{ {
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
* Linux has a controller-independent x86 interrupt architecture. * Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used * every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible * by the main code to do the right thing. Each driver-visible
* interrupt source is transparently wired to the apropriate * interrupt source is transparently wired to the appropriate
* controller. Thus drivers need not be aware of the * controller. Thus drivers need not be aware of the
* interrupt-controller. * interrupt-controller.
* *
...@@ -688,7 +688,7 @@ unsigned int probe_irq_mask(unsigned long val) ...@@ -688,7 +688,7 @@ unsigned int probe_irq_mask(unsigned long val)
* The interrupt probe logic state is returned to its previous * The interrupt probe logic state is returned to its previous
* value. * value.
* *
* BUGS: When used in a module (which arguably shouldnt happen) * BUGS: When used in a module (which arguably shouldn't happen)
* nothing prevents two IRQ probe callers from overlapping. The * nothing prevents two IRQ probe callers from overlapping. The
* results of this are non-optimal. * results of this are non-optimal.
*/ */
......
...@@ -154,7 +154,7 @@ static int read_ldt(void * ptr, unsigned long bytecount) ...@@ -154,7 +154,7 @@ static int read_ldt(void * ptr, unsigned long bytecount)
static int read_default_ldt(void * ptr, unsigned long bytecount) static int read_default_ldt(void * ptr, unsigned long bytecount)
{ {
/* Arbitary number */ /* Arbitrary number */
/* x86-64 default LDT is all zeros */ /* x86-64 default LDT is all zeros */
if (bytecount > 128) if (bytecount > 128)
bytecount = 128; bytecount = 128;
......
/* /*
* Intel Multiprocessor Specificiation 1.1 and 1.4 * Intel Multiprocessor Specification 1.1 and 1.4
* compliant MP-table parsing routines. * compliant MP-table parsing routines.
* *
* (c) 1995 Alan Cox, Building #3 <alan@redhat.com> * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
...@@ -849,7 +849,7 @@ void __init mp_config_acpi_legacy_irqs (void) ...@@ -849,7 +849,7 @@ void __init mp_config_acpi_legacy_irqs (void)
/* /*
* Use the default configuration for the IRQs 0-15. These may be * Use the default configuration for the IRQs 0-15. These may be
* overriden by (MADT) interrupt source override entries. * overridden by (MADT) interrupt source override entries.
*/ */
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
......
...@@ -120,8 +120,11 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx) ...@@ -120,8 +120,11 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
{ {
struct msr_command cmd; struct msr_command cmd;
preempt_disable();
if ( cpu == smp_processor_id() ) { if ( cpu == smp_processor_id() ) {
return wrmsr_eio(reg, eax, edx); int ret = wrmsr_eio(reg, eax, edx);
preempt_enable();
return ret;
} else { } else {
cmd.cpu = cpu; cmd.cpu = cpu;
cmd.reg = reg; cmd.reg = reg;
...@@ -129,6 +132,7 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx) ...@@ -129,6 +132,7 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
cmd.data[1] = edx; cmd.data[1] = edx;
smp_call_function(msr_smp_wrmsr, &cmd, 1, 1); smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
preempt_enable();
return cmd.err; return cmd.err;
} }
} }
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
dma_addr_t bad_dma_address = -1UL; dma_addr_t bad_dma_address = -1UL;
/* Map a set of buffers described by scatterlist in streaming /* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the * mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list * above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address * elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG). * and length. They are obtained via sg_dma_{address,length}(SG).
......
...@@ -437,8 +437,6 @@ static __init int init_k8_gatt(agp_kern_info *info) ...@@ -437,8 +437,6 @@ static __init int init_k8_gatt(agp_kern_info *info)
if (!gatt) if (!gatt)
panic("Cannot allocate GATT table"); panic("Cannot allocate GATT table");
memset(gatt, 0, gatt_size); memset(gatt, 0, gatt_size);
change_page_attr(virt_to_page(gatt), gatt_size/PAGE_SIZE, PAGE_KERNEL_NOCACHE);
global_flush_tlb();
agp_gatt_table = gatt; agp_gatt_table = gatt;
for_all_nb(dev) { for_all_nb(dev) {
...@@ -538,7 +536,8 @@ void __init pci_iommu_init(void) ...@@ -538,7 +536,8 @@ void __init pci_iommu_init(void)
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
bad_dma_address = iommu_bus_base; bad_dma_address = iommu_bus_base;
asm volatile("wbinvd" ::: "memory"); change_page_attr(virt_to_page(__va(iommu_start)), iommu_pages, PAGE_KERNEL);
global_flush_tlb();
} }
/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]] /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]]
......
...@@ -451,7 +451,7 @@ static int __init init_amd(struct cpuinfo_x86 *c) ...@@ -451,7 +451,7 @@ static int __init init_amd(struct cpuinfo_x86 *c)
if (!r) { if (!r) {
switch (c->x86) { switch (c->x86) {
case 15: case 15:
/* Should distingush Models here, but this is only /* Should distinguish Models here, but this is only
a fallback anyways. */ a fallback anyways. */
strcpy(c->x86_model_id, "Hammer"); strcpy(c->x86_model_id, "Hammer");
break; break;
......
...@@ -160,7 +160,7 @@ static inline void leave_mm (unsigned long cpu) ...@@ -160,7 +160,7 @@ static inline void leave_mm (unsigned long cpu)
* 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask); * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with * Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis * the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superflous * for the wrong mm, and in the worst case we perform a superfluous
* tlb flush. * tlb flush.
* 1a2) set cpu mmu_state to TLBSTATE_OK * 1a2) set cpu mmu_state to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
...@@ -250,7 +250,7 @@ static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, ...@@ -250,7 +250,7 @@ static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
BUG(); BUG();
/* /*
* i'm not happy about this global shared spinlock in the * I'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is. * MM hot path, but we'll see how contended it is.
* Temporarily this turns IRQs off, so that lockups are * Temporarily this turns IRQs off, so that lockups are
* detected by the NMI watchdog. * detected by the NMI watchdog.
......
...@@ -121,7 +121,11 @@ void fix_processor_context(void) ...@@ -121,7 +121,11 @@ void fix_processor_context(void)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct * t = init_tss + cpu; struct tss_struct * t = init_tss + cpu;
printk("Should fix processor context!\n"); set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
((struct n_desc_struct *) &cpu_gdt_table[cpu][GDT_ENTRY_TSS])->b &= 0xfffffdff;
syscall_init(); /* This sets MSR_*STAR and related */
load_TR_desc(); /* This does ltr */
load_LDT(&current->mm->context); /* This does lldt */ load_LDT(&current->mm->context); /* This does lldt */
/* /*
......
...@@ -323,14 +323,13 @@ int die_owner = -1; ...@@ -323,14 +323,13 @@ int die_owner = -1;
void die(const char * str, struct pt_regs * regs, long err) void die(const char * str, struct pt_regs * regs, long err)
{ {
int cpu;
struct die_args args = { regs, str, err };
static int die_counter; static int die_counter;
int cpu;
console_verbose(); console_verbose();
notifier_call_chain(&die_chain, DIE_DIE, &args);
bust_spinlocks(1); bust_spinlocks(1);
handle_BUG(regs); handle_BUG(regs);
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); printk(KERN_EMERG "%s: %04lx [%u]\n", str, err & 0xffff, ++die_counter);
notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
cpu = safe_smp_processor_id(); cpu = safe_smp_processor_id();
/* racy, but better than risking deadlock. */ /* racy, but better than risking deadlock. */
local_irq_disable(); local_irq_disable();
...@@ -662,7 +661,7 @@ void math_error(void *rip) ...@@ -662,7 +661,7 @@ void math_error(void *rip)
* C1 reg you need in case of a stack fault, 0x040 is the stack * C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time, * fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception, * so if this combination doesn't produce any single exception,
* then we have a bad program that isn't syncronizing its FPU usage * then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to * and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception * fully reproduce the context of the exception
*/ */
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright 2003 by Andi Kleen, SuSE Labs. * Copyright 2003 by Andi Kleen, SuSE Labs.
* *
* Long mode entry losely based on example code in chapter 14 of the x86-64 system * Long mode entry loosely based on example code in chapter 14 of the x86-64 system
* programmer's manual. * programmer's manual.
* *
* Notebook: * Notebook:
......
...@@ -58,7 +58,7 @@ bad_to_user: ...@@ -58,7 +58,7 @@ bad_to_user:
* rdx count * rdx count
* *
* Output: * Output:
* eax uncopied bytes or 0 if successfull. * eax uncopied bytes or 0 if successful.
*/ */
.globl copy_user_generic .globl copy_user_generic
copy_user_generic: copy_user_generic:
......
...@@ -83,6 +83,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) ...@@ -83,6 +83,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
limit >>= 16; limit >>= 16;
limit <<= 24; limit <<= 24;
limit |= (1<<24)-1;
if (limit > end_pfn_map << PAGE_SHIFT) if (limit > end_pfn_map << PAGE_SHIFT)
limit = end_pfn_map << PAGE_SHIFT; limit = end_pfn_map << PAGE_SHIFT;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/cache.h>
#include "pci.h" #include "pci.h"
...@@ -94,6 +95,8 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b) ...@@ -94,6 +95,8 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
} }
} }
struct pbus_set_ranges_data;
void __devinit void __devinit
pcibios_fixup_pbus_ranges (struct pci_bus *bus, struct pbus_set_ranges_data *ranges) pcibios_fixup_pbus_ranges (struct pci_bus *bus, struct pbus_set_ranges_data *ranges)
{ {
...@@ -129,6 +132,8 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) ...@@ -129,6 +132,8 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
return pci_scan_bus(busnum, pci_root_ops, NULL); return pci_scan_bus(busnum, pci_root_ops, NULL);
} }
extern u8 pci_cache_line_size;
static int __init pcibios_init(void) static int __init pcibios_init(void)
{ {
if (!pci_root_ops) { if (!pci_root_ops) {
...@@ -136,6 +141,8 @@ static int __init pcibios_init(void) ...@@ -136,6 +141,8 @@ static int __init pcibios_init(void)
return 0; return 0;
} }
pci_cache_line_size = boot_cpu_data.x86_clflush_size >> 2;
pcibios_resource_survey(); pcibios_resource_survey();
#ifdef CONFIG_GART_IOMMU #ifdef CONFIG_GART_IOMMU
......
...@@ -101,6 +101,7 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -101,6 +101,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
#define TIF_IRET 5 /* force IRET */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_IA32 17 /* 32bit process */ #define TIF_IA32 17 /* 32bit process */
...@@ -109,6 +110,7 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -109,6 +110,7 @@ static inline struct thread_info *stack_thread_info(void)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_IRET (1<<TIF_IRET)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_IA32 (1<<TIF_IA32) #define _TIF_IA32 (1<<TIF_IA32)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment