Commit eac9ee8e authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents 9fdca734 ab34c74c
......@@ -32,7 +32,7 @@ ENTRY(ia32_execve)
END(ia32_execve)
ENTRY(ia32_clone)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
alloc r16=ar.pfs,5,2,6,0
DO_SAVE_SWITCH_STACK
mov loc0=rp
......@@ -110,7 +110,9 @@ GLOBAL_ENTRY(ia32_ret_from_clone)
ld4 r2=[r2]
;;
mov r8=0
tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
and r2=_TIF_SYSCALL_TRACEAUDIT,r2
;;
cmp.ne p6,p0=r2,r0
(p6) br.cond.spnt .ia32_strace_check_retval
;; // prevent RAW on r8
END(ia32_ret_from_clone)
......@@ -142,7 +144,7 @@ GLOBAL_ENTRY(ia32_trace_syscall)
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp
;;
st8 [r2]=r3 // initialize return code to -ENOSYS
br.call.sptk.few rp=syscall_trace // give parent a chance to catch syscall args
br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args
.ret2: // Need to reload arguments (they may be changed by the tracing process)
adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1
adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13
......@@ -170,7 +172,7 @@ GLOBAL_ENTRY(ia32_trace_syscall)
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
;;
st8.spill [r2]=r8 // store return value in slot for r8
br.call.sptk.few rp=syscall_trace // give parent a chance to catch return value
br.call.sptk.few rp=syscall_trace_leave // give parent a chance to catch return value
.ret4: alloc r2=ar.pfs,0,0,0,0 // drop the syscall argument frame
br.cond.sptk.many ia64_leave_kernel
END(ia32_trace_syscall)
......
......@@ -1996,7 +1996,7 @@ sys32_sigaltstack (ia32_stack_t *uss32, ia32_stack_t *uoss32,
int ret;
mm_segment_t old_fs = get_fs();
if (uss32)
if (uss32) {
if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
return -EFAULT;
uss.ss_sp = (void *) (long) buf32.ss_sp;
......@@ -2008,6 +2008,7 @@ sys32_sigaltstack (ia32_stack_t *uss32, ia32_stack_t *uoss32,
goto out;
}
uss.ss_size = MINSIGSTKSZ;
}
set_fs(KERNEL_DS);
ret = do_sigaltstack(uss32 ? &uss : NULL, &uoss, pt->r12);
current->sas_ss_size = buf32.ss_size;
......
......@@ -48,11 +48,13 @@ static efi_status_t \
prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
{ \
struct ia64_fpreg fr[6]; \
efi_time_cap_t *atc = 0; \
efi_status_t ret; \
\
if (tc) \
atc = adjust_arg(tc); \
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), \
adjust_arg(tc)); \
ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
......@@ -89,11 +91,14 @@ static efi_status_t \
prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
{ \
struct ia64_fpreg fr[6]; \
efi_time_t *atm = 0; \
efi_status_t ret; \
\
if (tm) \
atm = adjust_arg(tm); \
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
enabled, adjust_arg(tm)); \
enabled, atm); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
......@@ -104,11 +109,14 @@ prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
unsigned long *data_size, void *data) \
{ \
struct ia64_fpreg fr[6]; \
u32 *aattr = 0; \
efi_status_t ret; \
\
if (attr) \
aattr = adjust_arg(attr); \
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \
adjust_arg(name), adjust_arg(vendor), adjust_arg(attr), \
adjust_arg(name), adjust_arg(vendor), aattr, \
adjust_arg(data_size), adjust_arg(data)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
......@@ -164,33 +172,41 @@ prefix##_reset_system (int reset_type, efi_status_t status, \
unsigned long data_size, efi_char16_t *data) \
{ \
struct ia64_fpreg fr[6]; \
efi_char16_t *adata = 0; \
\
if (data) \
adata = adjust_arg(data); \
\
ia64_save_scratch_fpregs(fr); \
efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \
reset_type, status, data_size, adjust_arg(data)); \
reset_type, status, data_size, adata); \
/* should not return, but just in case... */ \
ia64_load_scratch_fpregs(fr); \
}
STUB_GET_TIME(phys, __pa)
STUB_SET_TIME(phys, __pa)
STUB_GET_WAKEUP_TIME(phys, __pa)
STUB_SET_WAKEUP_TIME(phys, __pa)
STUB_GET_VARIABLE(phys, __pa)
STUB_GET_NEXT_VARIABLE(phys, __pa)
STUB_SET_VARIABLE(phys, __pa)
STUB_GET_NEXT_HIGH_MONO_COUNT(phys, __pa)
STUB_RESET_SYSTEM(phys, __pa)
STUB_GET_TIME(virt, )
STUB_SET_TIME(virt, )
STUB_GET_WAKEUP_TIME(virt, )
STUB_SET_WAKEUP_TIME(virt, )
STUB_GET_VARIABLE(virt, )
STUB_GET_NEXT_VARIABLE(virt, )
STUB_SET_VARIABLE(virt, )
STUB_GET_NEXT_HIGH_MONO_COUNT(virt, )
STUB_RESET_SYSTEM(virt, )
#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
STUB_GET_TIME(phys, phys_ptr)
STUB_SET_TIME(phys, phys_ptr)
STUB_GET_WAKEUP_TIME(phys, phys_ptr)
STUB_SET_WAKEUP_TIME(phys, phys_ptr)
STUB_GET_VARIABLE(phys, phys_ptr)
STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
STUB_SET_VARIABLE(phys, phys_ptr)
STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
STUB_RESET_SYSTEM(phys, phys_ptr)
#define id(arg) arg
STUB_GET_TIME(virt, id)
STUB_SET_TIME(virt, id)
STUB_GET_WAKEUP_TIME(virt, id)
STUB_SET_WAKEUP_TIME(virt, id)
STUB_GET_VARIABLE(virt, id)
STUB_GET_NEXT_VARIABLE(virt, id)
STUB_SET_VARIABLE(virt, id)
STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
STUB_RESET_SYSTEM(virt, id)
void
efi_gettimeofday (struct timespec *ts)
......
......@@ -508,7 +508,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
;;
stf.spill [r16]=f10
stf.spill [r17]=f11
br.call.sptk.many rp=syscall_trace // give parent a chance to catch syscall args
br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
adds r16=PT(F6)+16,sp
adds r17=PT(F7)+16,sp
;;
......@@ -548,7 +548,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
.strace_save_retval:
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
br.call.sptk.many rp=syscall_trace // give parent a chance to catch return value
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
.ret3: br.cond.sptk ia64_leave_syscall
strace_error:
......@@ -575,7 +575,7 @@ GLOBAL_ENTRY(ia64_strace_leave_kernel)
*/
nop.m 0
nop.i 0
br.call.sptk.many rp=syscall_trace // give parent a chance to catch return value
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
}
.ret4: br.cond.sptk ia64_leave_kernel
END(ia64_strace_leave_kernel)
......@@ -601,7 +601,9 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
ld4 r2=[r2]
;;
mov r8=0
tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
and r2=_TIF_SYSCALL_TRACEAUDIT,r2
;;
cmp.ne p6,p0=r2,r0
(p6) br.cond.spnt .strace_check_retval
;; // added stop bits to prevent r8 dependency
END(ia64_ret_from_clone)
......@@ -663,25 +665,31 @@ GLOBAL_ENTRY(ia64_leave_syscall)
PT_REGS_UNWIND_INFO(0)
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
* user- or fsys-mode, hence we disable interrupts early on:
* user- or fsys-mode, hence we disable interrupts early on.
*
* p6 controls whether current_thread_info()->flags needs to be check for
* extra work. We always check for extra work when returning to user-level.
* With CONFIG_PREEMPT, we also check for extra work when the preempt_count
* is 0. After extra work processing has been completed, execution
* resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
* needs to be redone.
*/
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
#else
(pUStk) rsm psr.i
#endif
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
.work_processed_syscall:
#ifdef CONFIG_PREEMPT
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
.pred.rel.mutex pUStk,pKStk
(pKStk) ld4 r21=[r20] // r21 <- preempt_count
(pUStk) mov r21=0 // r21 <- 0
;;
(p6) cmp.eq.unc p6,p0=r21,r0 // p6 <- p6 && (r21 == 0)
#endif /* CONFIG_PREEMPT */
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
#else /* !CONFIG_PREEMPT */
(pUStk) rsm psr.i
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
#endif
.work_processed_syscall:
adds r16=PT(LOADRS)+16,r12
adds r17=PT(AR_BSPSTORE)+16,r12
adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
......@@ -776,26 +784,31 @@ GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0)
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
* user- or fsys-mode, hence we disable interrupts early on:
* user- or fsys-mode, hence we disable interrupts early on.
*
* p6 controls whether current_thread_info()->flags needs to be check for
* extra work. We always check for extra work when returning to user-level.
* With CONFIG_PREEMPT, we also check for extra work when the preempt_count
* is 0. After extra work processing has been completed, execution
* resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
* needs to be redone.
*/
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
#else
(pUStk) rsm psr.i
#endif
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
;;
.work_processed_kernel:
#ifdef CONFIG_PREEMPT
adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
.pred.rel.mutex pUStk,pKStk
(pKStk) ld4 r21=[r20] // r21 <- preempt_count
(pUStk) mov r21=0 // r21 <- 0
;;
(p6) cmp.eq.unc p6,p0=r21,r0 // p6 <- p6 && (r21 == 0)
#endif /* CONFIG_PREEMPT */
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
#else
(pUStk) rsm psr.i
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
#endif
.work_processed_kernel:
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
(p6) ld4 r31=[r17] // load current_thread_info()->flags
......@@ -1065,7 +1078,7 @@ skip_rbs_switch:
br.cond.sptk.many .work_processed_kernel // re-check
.notify:
br.call.spnt.many rp=notify_resume_user
(pUStk) br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
(pLvSys)br.cond.sptk.many .work_processed_syscall // don't re-check
br.cond.sptk.many .work_processed_kernel // don't re-check
......
......@@ -165,7 +165,6 @@ ENTRY(fsys_gettimeofday)
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
addl r3=THIS_CPU(cpu_info),r0
mov.m r31=ar.itc // put time stamp into r31 (ITC) == now (35 cyc)
#ifdef CONFIG_SMP
movl r10=__per_cpu_offset
movl r2=sal_platform_features
......@@ -240,12 +239,13 @@ EX(.fail_efault, probe.w.fault r10, 3) // this must come _after_ NaT-check
;;
ldf8 f8=[r21] // f8 now contains itm_next
mov.m r31=ar.itc // put time stamp into r31 (ITC) == now
sub r28=r29, r28, 1 // r28 now contains "-(lost + 1)"
tbit.nz p9, p10=r23, 0 // p9 <- is_odd(r23), p10 <- is_even(r23)
;;
ld8 r2=[r19] // r2 = sec = xtime.tv_sec
ld8 r29=[r20] // r29 = nsec = xtime.tv_nsec
tbit.nz p9, p10=r23, 0 // p9 <- is_odd(r23), p10 <- is_even(r23)
setf.sig f6=r28 // f6 <- -(lost + 1) (6 cyc)
;;
......@@ -260,7 +260,6 @@ EX(.fail_efault, probe.w.fault r10, 3) // this must come _after_ NaT-check
nop 0
;;
mov r31=ar.itc // re-read ITC in case we .retry (35 cyc)
xma.l f8=f11, f8, f12 // f8 (elapsed_cycles) <- (-1*last_tick + now) = (now - last_tick)
nop 0
;;
......
......@@ -752,7 +752,9 @@ ENTRY(break_fault)
;;
ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
;;
cmp.eq p8,p0=r2,r0
mov b6=r20
;;
(p8) br.call.sptk.many b6=b6 // ignore this return addr
......@@ -1573,10 +1575,11 @@ ENTRY(dispatch_to_ia32_handler)
ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
ld8 r16=[r16]
tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
;;
mov b6=r16
movl r15=ia32_ret_from_syscall
cmp.eq p8,p0=r2,r0
;;
mov rp=r15
(p8) br.call.sptk.many b6=b6
......
......@@ -43,12 +43,6 @@ machvec_init (const char *name)
#endif /* CONFIG_IA64_GENERIC */
void
machvec_noop (void)
{
}
EXPORT_SYMBOL(machvec_noop);
void
machvec_setup (char **arg)
{
......
......@@ -247,7 +247,9 @@ ia64_mca_log_sal_error_record(int sal_info_type)
u8 *buffer;
u64 size;
int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
#ifdef IA64_MCA_DEBUG_INFO
static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
#endif
size = ia64_log_get(sal_info_type, &buffer, irq_safe);
if (!size)
......@@ -596,7 +598,7 @@ ia64_mca_cmc_vector_disable (void *dummy)
cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected "
"machine check vector %#x disabled.\n",
......@@ -623,7 +625,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected "
"machine check vector %#x enabled.\n",
......
......@@ -656,26 +656,18 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
case RV_PCREL:
switch (r_type) {
case R_IA64_PCREL21B:
if (in_init(mod, val)) {
/* Calls to init code from core are bad news */
if (in_core(mod, (uint64_t)location)) {
printk(KERN_ERR "%s: init symbol 0x%lx used in module code at %p\n",
mod->name, val, location);
return -ENOEXEC;
}
} else if (in_core(mod, val)) {
if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
(in_core(mod, val) && in_init(mod, (uint64_t)location))) {
/*
* Init section may have been allocated far away from core,
* if the branch won't reach, then allocate a plt for it.
*/
if (in_init(mod, (uint64_t)location)) {
uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
if (delta + (1 << 20) >= (1 << 21)) {
val = get_fdesc(mod, val, &ok);
val = get_plt(mod, location, val, &ok);
}
}
} else
} else if (!is_internal(mod, val))
val = get_plt(mod, location, val, &ok);
/* FALL THROUGH */
default:
......
......@@ -54,7 +54,7 @@ END(ia64_pal_default_handler)
*
*/
GLOBAL_ENTRY(ia64_pal_call_static)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
alloc loc1 = ar.pfs,5,5,0,0
movl loc2 = pal_entry_point
1: {
......@@ -100,7 +100,7 @@ END(ia64_pal_call_static)
* in2 - in3 Remaning PAL arguments
*/
GLOBAL_ENTRY(ia64_pal_call_stacked)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
alloc loc1 = ar.pfs,4,4,4,0
movl loc2 = pal_entry_point
......@@ -147,7 +147,7 @@ END(ia64_pal_call_stacked)
GLOBAL_ENTRY(ia64_pal_call_phys_static)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
alloc loc1 = ar.pfs,4,7,0,0
movl loc2 = pal_entry_point
1: {
......
......@@ -4702,11 +4702,12 @@ static int
pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
{
struct task_struct *task;
int state;
int state, old_state;
recheck:
state = ctx->ctx_state;
task = ctx->ctx_task;
task = PFM_CTX_TASK(ctx);
if (task == NULL) {
DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
return 0;
......@@ -4728,24 +4729,46 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
if (task == current || ctx->ctx_fl_system) return 0;
/*
* context is UNLOADED, MASKED we are safe to go
* no command can operate on a zombie context
*/
if (state != PFM_CTX_LOADED) return 0;
if (state == PFM_CTX_ZOMBIE) {
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
return -EINVAL;
}
if (state == PFM_CTX_ZOMBIE) return -EINVAL;
/*
* if context is UNLOADED, MASKED we are safe to go
*/
if (state != PFM_CTX_LOADED) return 0;
/*
* context is loaded, we must make sure the task is stopped
* context is LOADED, we must make sure the task is stopped
* We could lift this restriction for UP but it would mean that
* the user has no guarantee the task would not run between
* two successive calls to perfmonctl(). That's probably OK.
* If this user wants to ensure the task does not run, then
* the task must be stopped.
*/
if (PFM_CMD_STOPPED(cmd) && task->state != TASK_STOPPED) {
if (PFM_CMD_STOPPED(cmd)) {
if (task->state != TASK_STOPPED) {
DPRINT(("[%d] task not in stopped state\n", task->pid));
return -EBUSY;
}
/*
* task is now stopped, wait for ctxsw out
*
* This is an interesting point in the code.
* We need to unprotect the context because
* the pfm_save_regs() routines needs to grab
* the same lock. There are danger in doing
* this because it leaves a window open for
* another task to get access to the context
* and possibly change its state. The one thing
* that is not possible is for the context to disappear
* because we are protected by the VFS layer, i.e.,
* get_fd()/put_fd().
*/
old_state = state;
UNPROTECT_CTX(ctx, flags);
......@@ -4753,6 +4776,14 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
PROTECT_CTX(ctx, flags);
/*
* we must recheck to verify if state has changed
*/
if (ctx->ctx_state != old_state) {
DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
goto recheck;
}
}
return 0;
}
......
......@@ -1447,9 +1447,8 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
return ret;
}
/* "asmlinkage" so the input arguments are preserved... */
asmlinkage void
void
syscall_trace (void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
......@@ -1472,3 +1471,38 @@ syscall_trace (void)
current->exit_code = 0;
}
}
/* "asmlinkage" so the input arguments are preserved... */
asmlinkage void
syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7, long stack)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
long syscall;
if (unlikely(current->audit_context)) {
if (IS_IA32_PROCESS(regs))
syscall = regs->r1;
else
syscall = regs->r15;
audit_syscall_entry(current, syscall, arg0, arg1, arg2, arg3);
}
if (test_thread_flag(TIF_SYSCALL_TRACE) && (current->ptrace & PT_PTRACED))
syscall_trace();
}
/* "asmlinkage" so the input arguments are preserved... */
asmlinkage void
syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7, long stack)
{
if (unlikely(current->audit_context))
audit_syscall_exit(current, ((struct pt_regs *) &stack)->r8);
if (test_thread_flag(TIF_SYSCALL_TRACE) && (current->ptrace & PT_PTRACED))
syscall_trace();
}
......@@ -188,6 +188,27 @@ sal_desc_ap_wakeup (void *p)
break;
}
}
static void __init
chk_nointroute_opt(void)
{
char *cp;
extern char saved_command_line[];
for (cp = saved_command_line; *cp; ) {
if (memcmp(cp, "nointroute", 10) == 0) {
no_int_routing = 1;
printk ("no_int_routing on\n");
break;
} else {
while (*cp != ' ' && *cp)
++cp;
while (*cp == ' ')
++cp;
}
}
}
#else
static void __init sal_desc_ap_wakeup(void *p) { }
#endif
......@@ -207,6 +228,9 @@ ia64_sal_init (struct ia64_sal_systab *systab)
printk(KERN_ERR "bad signature in system table!");
check_versions(systab);
#ifdef CONFIG_SMP
chk_nointroute_opt();
#endif
/* revisions are coded in BCD, so %x does the job for us */
printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n",
......
......@@ -72,7 +72,7 @@ sn_enable_irq(unsigned int irq)
{
}
static inline void move_irq(int irq)
static inline void sn_move_irq(int irq)
{
/* note - we hold desc->lock */
cpumask_t tmp;
......@@ -110,7 +110,7 @@ sn_ack_irq(unsigned int irq)
}
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask );
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
move_irq(irq);
sn_move_irq(irq);
}
static void
......
......@@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
}
......@@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
}
......
......@@ -6,6 +6,8 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/machvec.h>
#define dma_alloc_coherent platform_dma_alloc_coherent
#define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */
#define dma_free_coherent platform_dma_free_coherent
......
......@@ -69,7 +69,11 @@ typedef unsigned short ia64_mv_readw_relaxed_t (void *);
typedef unsigned int ia64_mv_readl_relaxed_t (void *);
typedef unsigned long ia64_mv_readq_relaxed_t (void *);
extern void machvec_noop (void);
static inline void
machvec_noop (void)
{
}
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
......
......@@ -73,12 +73,15 @@ struct thread_info {
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_WORK_MASK 0x7 /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE */
#define TIF_ALLWORK_MASK 0xf /* bits 0..3 are "work to do on user-return" bits */
#define TIF_ALLWORK_MASK 0x1f /* bits 0..4 are "work to do on user-return" bits */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
......
......@@ -162,7 +162,7 @@ config AUDIT
config AUDITSYSCALL
bool "Enable system-call auditing support"
depends on AUDIT && (X86 || PPC64 || ARCH_S390)
depends on AUDIT && (X86 || PPC64 || ARCH_S390 || IA64)
default y if SECURITY_SELINUX
default n
help
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment