Commit b0d24ad7 authored by David Mosberger's avatar David Mosberger Committed by David Mosberger

[PATCH] More 2.5.6 sync up.

Take advantage of new per-CPU scheme.
parent 6b9f9360
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/signal.h> #include <asm/signal.h>
#include <asm/thread_info.h>
#include "../kernel/minstate.h" #include "../kernel/minstate.h"
...@@ -87,18 +88,21 @@ END(sys32_sigsuspend) ...@@ -87,18 +88,21 @@ END(sys32_sigsuspend)
GLOBAL_ENTRY(ia32_ret_from_clone) GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
/* /*
* We need to call schedule_tail() to complete the scheduling process. * We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the * Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task. * address of the previously executing task.
*/ */
br.call.sptk.many rp=ia64_invoke_schedule_tail br.call.sptk.many rp=ia64_invoke_schedule_tail
.ret1: adds r2=IA64_TASK_PTRACE_OFFSET,r13 .ret1:
#endif
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;; ;;
ld8 r2=[r2] ld4 r2=[r2]
;; ;;
mov r8=0 mov r8=0
tbit.nz p6,p0=r2,PT_SYSCALLTRACE_BIT tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
(p6) br.cond.spnt .ia32_strace_check_retval (p6) br.cond.spnt .ia32_strace_check_retval
;; // prevent RAW on r8 ;; // prevent RAW on r8
END(ia32_ret_from_clone) END(ia32_ret_from_clone)
......
...@@ -622,7 +622,7 @@ GLOBAL_ENTRY(ia64_leave_kernel) ...@@ -622,7 +622,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
shr.u r18=r19,16 // get byte size of existing "dirty" partition shr.u r18=r19,16 // get byte size of existing "dirty" partition
;; ;;
mov r16=ar.bsp // get existing backing store pointer mov r16=ar.bsp // get existing backing store pointer
movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
;; ;;
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
(pKern) br.cond.dpnt skip_rbs_switch (pKern) br.cond.dpnt skip_rbs_switch
...@@ -758,6 +758,7 @@ ENTRY(handle_syscall_error) ...@@ -758,6 +758,7 @@ ENTRY(handle_syscall_error)
br.cond.sptk ia64_leave_kernel br.cond.sptk ia64_leave_kernel
END(handle_syscall_error) END(handle_syscall_error)
#ifdef CONFIG_SMP
/* /*
* Invoke schedule_tail(task) while preserving in0-in7, which may be needed * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
* in case a system call gets restarted. * in case a system call gets restarted.
...@@ -774,6 +775,8 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail) ...@@ -774,6 +775,8 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail)
br.ret.sptk.many rp br.ret.sptk.many rp
END(ia64_invoke_schedule_tail) END(ia64_invoke_schedule_tail)
#endif /* CONFIG_SMP */
#if __GNUC__ < 3 #if __GNUC__ < 3
/* /*
......
...@@ -59,7 +59,7 @@ EXPORT_SYMBOL(clear_page); ...@@ -59,7 +59,7 @@ EXPORT_SYMBOL(clear_page);
#include <asm/processor.h> #include <asm/processor.h>
# ifndef CONFIG_NUMA # ifndef CONFIG_NUMA
EXPORT_SYMBOL(_cpu_data); EXPORT_SYMBOL(cpu_info);
# endif # endif
EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(kernel_thread);
......
...@@ -645,7 +645,6 @@ ENTRY(break_fault) ...@@ -645,7 +645,6 @@ ENTRY(break_fault)
mov r3=255 mov r3=255
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
adds r2=IA64_TASK_PTRACE_OFFSET,r13 // r2 = &current->ptrace
;; ;;
cmp.geu p6,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ? cmp.geu p6,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ?
movl r16=sys_call_table movl r16=sys_call_table
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment