Commit 2ff9a232 authored by David Mosberger's avatar David Mosberger

ia64: Manual merge for 2.5.35+.

parents 6865038a 0b6decd4
......@@ -96,6 +96,8 @@ MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
vmlinux: arch/$(ARCH)/vmlinux.lds.s
CPPFLAGS_arch/$(ARCH)/vmlinux.lds.s
compressed: vmlinux
$(OBJCOPY) $(OBJCOPYFLAGS) vmlinux vmlinux-tmp
gzip vmlinux-tmp
......@@ -104,30 +106,14 @@ compressed: vmlinux
rawboot:
@$(MAKEBOOT) rawboot
#
# My boot writes directly to a specific disk partition, I doubt most
# people will want to do that without changes..
#
msb my-special-boot:
@$(MAKEBOOT) msb
bootimage:
@$(MAKEBOOT) bootimage
srmboot:
@$(MAKEBOOT) srmboot
archclean:
@$(MAKEBOOT) clean
archmrproper:
@$(MAKE) -C arch/$(ARCH)/tools mrproper
bootpfile:
@$(MAKEBOOT) bootpfile
prepare: $(TOPDIR)/include/asm-ia64/offsets.h
$(TOPDIR)/include/asm-ia64/offsets.h: include/asm include/linux/version.h \
include/config/MARKER
@$(MAKE) -C arch/$(ARCH)/tools $@
\ No newline at end of file
@$(MAKE) -C arch/$(ARCH)/tools $@
......@@ -2111,8 +2111,8 @@ struct shm_info32 {
};
struct ipc_kludge {
struct msgbuf *msgp;
long msgtyp;
u32 msgp;
s32 msgtyp;
};
#define SEMOP 1
......
......@@ -2,7 +2,7 @@
* This file contains the code that gets mapped at the upper end of each task's text
* region. For now, it contains the signal trampoline code only.
*
* Copyright (C) 1999-2001 Hewlett-Packard Co
* Copyright (C) 1999-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
......@@ -135,7 +135,7 @@ back_from_setup_rbs:
;;
ld8 r8=[base0] // restore (perhaps modified) CFM0, EC0, and CPL0
cmp.ne p8,p0=r14,r15 // do we need to restore the rbs?
(p8) br.cond.spnt restore_rbs // yup -> (clobbers r14 and r16)
(p8) br.cond.spnt restore_rbs // yup -> (clobbers r14-r18, f6 & f7)
;;
back_from_restore_rbs:
adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
......@@ -189,20 +189,69 @@ setup_rbs:
.spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
.body
restore_rbs:
// On input:
// r14 = bsp1 (bsp at the time of return from signal handler)
// r15 = bsp0 (bsp at the time the signal occurred)
//
// Here, we need to calculate bspstore0, the value that ar.bspstore needs
// to be set to, based on bsp0 and the size of the dirty partition on
// the alternate stack (sc_loadrs >> 16). This can be done with the
// following algorithm:
//
// bspstore0 = rse_skip_regs(bsp0, -rse_num_regs(bsp1 - (loadrs >> 19), bsp1));
//
// This is what the code below does.
//
alloc r2=ar.pfs,0,0,0,0 // alloc null frame
adds r16=(LOADRS_OFF+SIGCONTEXT_OFF),sp
adds r18=(RNAT_OFF+SIGCONTEXT_OFF),sp
;;
ld8 r14=[r16]
adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp
ld8 r17=[r16]
ld8 r16=[r18] // get new rnat
extr.u r18=r15,3,6 // r18 <- rse_slot_num(bsp0)
;;
mov ar.rsc=r14 // put RSE into enforced lazy mode
ld8 r14=[r16] // get new rnat
mov ar.rsc=r17 // put RSE into enforced lazy mode
shr.u r17=r17,16
;;
loadrs // restore dirty partition
sub r14=r14,r17 // r14 (bspstore1) <- bsp1 - (sc_loadrs >> 16)
shr.u r17=r17,3 // r17 <- (sc_loadrs >> 19)
;;
loadrs // restore dirty partition
extr.u r14=r14,3,6 // r14 <- rse_slot_num(bspstore1)
;;
add r14=r14,r17 // r14 <- rse_slot_num(bspstore1) + (sc_loadrs >> 19)
;;
shr.u r14=r14,6 // r14 <- (rse_slot_num(bspstore1) + (sc_loadrs >> 19))/0x40
;;
sub r14=r14,r17 // r14 <- -rse_num_regs(bspstore1, bsp1)
movl r17=0x8208208208208209
;;
add r18=r18,r14 // r18 (delta) <- rse_slot_num(bsp0) - rse_num_regs(bspstore1,bsp1)
setf.sig f7=r17
cmp.lt p7,p0=r14,r0 // p7 <- (r14 < 0)?
;;
(p7) adds r18=-62,r18 // delta -= 62
;;
setf.sig f6=r18
;;
xmpy.h f6=f6,f7
;;
getf.sig r17=f6
;;
add r17=r17,r18
shr r18=r18,63
;;
shr r17=r17,5
;;
sub r17=r17,r18 // r17 = delta/63
;;
add r17=r14,r17 // r17 <- delta/63 - rse_num_regs(bspstore1, bsp1)
;;
shladd r15=r17,3,r15 // r15 <- bsp0 + 8*(delta/63 - rse_num_regs(bspstore1, bsp1))
;;
mov ar.bspstore=r15 // switch back to old register backing store area
;;
mov ar.rnat=r14 // restore RNaT
mov ar.rnat=r16 // restore RNaT
mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc)
// invala not necessary as that will happen when returning to user-mode
br.cond.sptk back_from_restore_rbs
......
......@@ -127,6 +127,8 @@ EXPORT_SYMBOL(ia64_pal_call_phys_stacked);
EXPORT_SYMBOL(ia64_pal_call_phys_static);
EXPORT_SYMBOL(ia64_pal_call_stacked);
EXPORT_SYMBOL(ia64_pal_call_static);
EXPORT_SYMBOL(ia64_load_scratch_fpregs);
EXPORT_SYMBOL(ia64_save_scratch_fpregs);
extern struct efi efi;
EXPORT_SYMBOL(efi);
......
......@@ -403,8 +403,8 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
break;
desc->status &= ~IRQ_PENDING;
}
out:
desc->status &= ~IRQ_INPROGRESS;
out:
/*
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
......@@ -788,7 +788,7 @@ int setup_irq(unsigned int irq, struct irqaction * new)
if (!shared) {
desc->depth = 0;
desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
desc->handler->startup(irq);
}
spin_unlock_irqrestore(&desc->lock,flags);
......
......@@ -245,3 +245,48 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
br.ret.sptk.many b0
END(ia64_pal_call_phys_stacked)
/*
* Save scratch fp scratch regs which aren't saved in pt_regs already (fp10-fp15).
*
* NOTE: We need to do this since firmware (SAL and PAL) may use any of the scratch
* regs fp-low partition.
*
* Inputs:
* in0 Address of stack storage for fp regs
*/
GLOBAL_ENTRY(ia64_save_scratch_fpregs)
alloc r3=ar.pfs,1,0,0,0
add r2=16,in0
;;
stf.spill [in0] = f10,32
stf.spill [r2] = f11,32
;;
stf.spill [in0] = f12,32
stf.spill [r2] = f13,32
;;
stf.spill [in0] = f14,32
stf.spill [r2] = f15,32
br.ret.sptk.many rp
END(ia64_save_scratch_fpregs)
/*
* Load scratch fp scratch regs (fp10-fp15)
*
* Inputs:
* in0 Address of stack storage for fp regs
*/
GLOBAL_ENTRY(ia64_load_scratch_fpregs)
alloc r3=ar.pfs,1,0,0,0
add r2=16,in0
;;
ldf.fill f10 = [in0],32
ldf.fill f11 = [r2],32
;;
ldf.fill f12 = [in0],32
ldf.fill f13 = [r2],32
;;
ldf.fill f14 = [in0],32
ldf.fill f15 = [r2],32
br.ret.sptk.many rp
END(ia64_load_scratch_fpregs)
This diff is collapsed.
......@@ -15,8 +15,8 @@
* test if they need to do any extra work (up needs to do something
* only if count was negative before the increment operation.
*
* "sleepers" and the contention routine ordering is protected by the
* semaphore spinlock.
* "sleeping" and the contention routine ordering is protected
* by the spinlock in the semaphore's waitqueue head.
*
* Note that these functions are only called when there is contention
* on the lock, and as such all this is the "non-critical" part of the
......@@ -44,40 +44,42 @@ __up (struct semaphore *sem)
wake_up(&sem->wait);
}
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
void
__down (struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
remove_wait_queue(&sem->wait, &wait);
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
int
......@@ -86,10 +88,12 @@ __down_interruptible (struct semaphore * sem)
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers ++;
for (;;) {
int sleepers = sem->sleepers;
......@@ -110,25 +114,27 @@ __down_interruptible (struct semaphore * sem)
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock. The
* "-1" is because we're still hoping to get
* the lock.
* playing, because we own the spinlock in
* wait_queue_head. The "-1" is because we're
* still hoping to get the semaphore.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
wake_up(&sem->wait);
return retval;
}
......@@ -142,17 +148,19 @@ __down_trylock (struct semaphore *sem)
unsigned long flags;
int sleepers;
spin_lock_irqsave(&semaphore_lock, flags);
spin_lock_irqsave(&sem->wait.lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
* playing, because we own the spinlock in the
* wait_queue_head.
*/
if (!atomic_add_negative(sleepers, &sem->count))
wake_up(&sem->wait);
if (!atomic_add_negative(sleepers, &sem->count)) {
wake_up_locked(&sem->wait);
}
spin_unlock_irqrestore(&semaphore_lock, flags);
spin_unlock_irqrestore(&sem->wait.lock, flags);
return 1;
}
......@@ -354,6 +354,15 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
return err;
}
/*
* Check whether the register-backing store is already on the signal stack.
*/
static inline int
rbs_on_sig_stack (unsigned long bsp)
{
return (bsp - current->sas_ss_sp < current->sas_ss_size);
}
static long
setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
struct sigscratch *scr)
......@@ -366,10 +375,17 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
frame = (void *) scr->pt.r12;
tramp_addr = GATE_ADDR + (ia64_sigtramp - __start_gate_section);
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack((unsigned long) frame)) {
new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
frame = (void *) ((current->sas_ss_sp + current->sas_ss_size)
& ~(STACK_ALIGN - 1));
if (ka->sa.sa_flags & SA_ONSTACK) {
/*
* We need to check the memory and register stacks separately, because
* they're switched separately (memory stack is switched in the kernel,
* register stack is switched in the signal trampoline).
*/
if (!on_sig_stack((unsigned long) frame))
frame = (void *) ((current->sas_ss_sp + current->sas_ss_size)
& ~(STACK_ALIGN - 1));
if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
}
frame = (void *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1));
......
......@@ -15,6 +15,7 @@ obj-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
obj-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
obj-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
obj-$(CONFIG_PERFMON) += carta_random.o
IGNORE_FLAGS_OBJS = __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
......
/*
* Fast, simple, yet decent quality random number generator based on
* a paper by David G. Carta ("Two Fast Implementations of the
* `Minimal Standard' Random Number Generator," Communications of the
* ACM, January, 1990).
*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/asmmacro.h>
#define a r2
#define m r3
#define lo r8
#define hi r9
#define t0 r16
#define t1 r17
#define seed r32
GLOBAL_ENTRY(carta_random32)
movl a = (16807 << 16) | 16807
;;
pmpyshr2.u t0 = a, seed, 0
pmpyshr2.u t1 = a, seed, 16
;;
unpack2.l t0 = t1, t0
dep m = -1, r0, 0, 31
;;
zxt4 lo = t0
shr.u hi = t0, 32
;;
dep t0 = 0, hi, 15, 49 // t0 = (hi & 0x7fff)
;;
shl t0 = t0, 16 // t0 = (hi & 0x7fff) << 16
shr t1 = hi, 15 // t1 = (hi >> 15)
;;
add lo = lo, t0
;;
cmp.gtu p6, p0 = lo, m
;;
(p6) and lo = lo, m
;;
(p6) add lo = 1, lo
;;
add lo = lo, t1
;;
cmp.gtu p6, p0 = lo, m
;;
(p6) and lo = lo, m
;;
(p6) add lo = 1, lo
br.ret.sptk.many rp
END(carta_random32)
......@@ -78,7 +78,7 @@ ia64_init_addr_space (void)
vma->vm_mm = current->mm;
vma->vm_start = IA64_RBS_BOT;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_COPY;
vma->vm_page_prot = protection_map[VM_READ | VM_WRITE];
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
vma->vm_ops = NULL;
vma->vm_pgoff = 0;
......
......@@ -78,6 +78,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/fpu.h>
/*
* Data types needed to pass information into PAL procedures and
......@@ -649,12 +650,43 @@ extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64, u64);
extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
#define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3, 0)
#define PAL_CALL_IC_OFF(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3, 1)
#define PAL_CALL_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_stacked(a0, a1, a2, a3)
#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_static(a0, a1, a2, a3)
#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3)
extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
#define PAL_CALL(iprv,a0,a1,a2,a3) do { \
struct ia64_fpreg fr[6]; \
ia64_save_scratch_fpregs(fr); \
iprv = ia64_pal_call_static(a0, a1, a2, a3, 0); \
ia64_load_scratch_fpregs(fr); \
} while (0)
#define PAL_CALL_IC_OFF(iprv,a0,a1,a2,a3) do { \
struct ia64_fpreg fr[6]; \
ia64_save_scratch_fpregs(fr); \
iprv = ia64_pal_call_static(a0, a1, a2, a3, 1); \
ia64_load_scratch_fpregs(fr); \
} while (0)
#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do { \
struct ia64_fpreg fr[6]; \
ia64_save_scratch_fpregs(fr); \
iprv = ia64_pal_call_stacked(a0, a1, a2, a3); \
ia64_load_scratch_fpregs(fr); \
} while (0)
#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do { \
struct ia64_fpreg fr[6]; \
ia64_save_scratch_fpregs(fr); \
iprv = ia64_pal_call_phys_static(a0, a1, a2, a3); \
ia64_load_scratch_fpregs(fr); \
} while (0)
#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do { \
struct ia64_fpreg fr[6]; \
ia64_save_scratch_fpregs(fr); \
iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3); \
ia64_load_scratch_fpregs(fr); \
} while (0)
typedef int (*ia64_pal_handler) (u64, ...);
extern ia64_pal_handler ia64_pal;
......
......@@ -45,6 +45,7 @@
* PMC flags
*/
#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
#define PFM_REGFL_RANDOM 0x2 /* randomize sampling periods */
/*
* PMD/PMC/IBR/DBR return flags (ignored on input)
......@@ -86,8 +87,10 @@ typedef struct {
unsigned long reg_short_reset;/* reset after counter overflow (small) */
unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
unsigned long reg_random_seed; /* seed value when randomization is used */
unsigned long reg_random_mask; /* bitmask used to limit random value */
unsigned long reserved[16]; /* for future use */
unsigned long reserved[14]; /* for future use */
} pfarg_reg_t;
typedef struct {
......@@ -132,28 +135,28 @@ typedef struct {
#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
/*
* Entry header in the sampling buffer.
* The header is directly followed with the PMDS saved in increasing index
* order: PMD4, PMD5, .... How many PMDs are present is determined by the
* user program during context creation.
* Entry header in the sampling buffer. The header is directly followed
* with the PMDs saved in increasing index order: PMD4, PMD5, .... How
* many PMDs are present is determined by the user program during
* context creation.
*
* XXX: in this version of the entry, only up to 64 registers can be recorded
* This should be enough for quite some time. Always check sampling format
* before parsing entries!
* XXX: in this version of the entry, only up to 64 registers can be
* recorded. This should be enough for quite some time. Always check
* sampling format before parsing entries!
*
* Inn the case where multiple counters have overflowed at the same time, the
* rate field indicate the initial value of the first PMD, based on the index.
* For instance, if PMD2 and PMD5 have ovewrflowed for this entry, the rate field
* will show the initial value of PMD2.
* In the case where multiple counters overflow at the same time, the
* last_reset_value member indicates the initial value of the PMD with
* the smallest index. For instance, if PMD2 and PMD5 have overflowed,
* the last_reset_value member contains the initial value of PMD2.
*/
typedef struct {
int pid; /* identification of process */
int cpu; /* which cpu was used */
unsigned long rate; /* initial value of overflowed counter */
unsigned long stamp; /* timestamp */
unsigned long ip; /* where did the overflow interrupt happened */
unsigned long regs; /* bitmask of which registers overflowed */
unsigned long period; /* sampling period used by overflowed counter (smallest pmd index) */
int pid; /* identification of process */
int cpu; /* which cpu was used */
unsigned long last_reset_value; /* initial value of counter that overflowed */
unsigned long stamp; /* timestamp */
unsigned long ip; /* where did the overflow interrupt happened */
unsigned long regs; /* bitmask of which registers overflowed */
unsigned long period; /* unused */
} perfmon_smpl_entry_t;
extern int perfmonctl(pid_t pid, int cmd, void *arg, int narg);
......
......@@ -37,9 +37,9 @@ ia64_rse_rnat_addr (unsigned long *slot_addr)
}
/*
* Calcuate the number of registers in the dirty partition starting at
* BSPSTORE with a size of DIRTY bytes. This isn't simply DIRTY
* divided by eight because the 64th slot is used to store ar.rnat.
* Calculate the number of registers in the dirty partition starting at BSPSTORE and
* ending at BSP. This isn't simply (BSP-BSPSTORE)/8 because every 64th slot stores
* ar.rnat.
*/
static __inline__ unsigned long
ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
......
......@@ -38,9 +38,12 @@ extern spinlock_t sal_lock;
# define SAL_CALL(result,args...) do { \
unsigned long flags; \
struct ia64_fpreg fr[6]; \
ia64_save_scratch_fpregs(fr); \
spin_lock_irqsave(&sal_lock, flags); \
__SAL_CALL(result,args); \
spin_unlock_irqrestore(&sal_lock, flags); \
ia64_load_scratch_fpregs(fr); \
} while (0)
#define SAL_SET_VECTORS 0x01000000
......
......@@ -66,6 +66,7 @@ typedef struct siginfo {
long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
int _fd;
} _sigpoll;
/* SIGPROF */
struct {
pid_t _pid; /* which child */
......
......@@ -148,7 +148,7 @@ do { \
"cmp.ne p6,p7=%1,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"srlz.d" \
"(p6) srlz.d" \
: "=&r" (old_psr) : "r"((psr) & IA64_PSR_I) \
: "p6", "p7", "memory"); \
if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) { \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment