Commit 58cb4a77 authored by David Mosberger's avatar David Mosberger Committed by David Mosberger

ia64: Fix bug in ia64_get_scratch_nat_bits()/ia64_put_scratch_nat_bits().

parent 7f2ca985
...@@ -75,12 +75,25 @@ ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) ...@@ -75,12 +75,25 @@ ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
({ \ ({ \
unsigned long bit = ia64_unat_pos(&pt->r##first); \ unsigned long bit = ia64_unat_pos(&pt->r##first); \
unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \ unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \
(ia64_rotl(unat, first) >> bit) & mask; \ unsigned long dist; \
if (bit < first) \
dist = 64 + bit - first; \
else \
dist = bit - first; \
ia64_rotr(unat, dist) & mask; \
}) })
unsigned long val; unsigned long val;
val = GET_BITS( 1, 3, scratch_unat); /*
val |= GET_BITS(12, 15, scratch_unat); * Registers that are stored consecutively in struct pt_regs can be handled in
* parallel. If the register order in struct_pt_regs changes, this code MUST be
* updated.
*/
val = GET_BITS( 1, 1, scratch_unat);
val |= GET_BITS( 2, 3, scratch_unat);
val |= GET_BITS(12, 13, scratch_unat);
val |= GET_BITS(14, 14, scratch_unat);
val |= GET_BITS(15, 15, scratch_unat);
val |= GET_BITS( 8, 11, scratch_unat); val |= GET_BITS( 8, 11, scratch_unat);
val |= GET_BITS(16, 31, scratch_unat); val |= GET_BITS(16, 31, scratch_unat);
return val; return val;
...@@ -96,16 +109,29 @@ ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) ...@@ -96,16 +109,29 @@ ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
unsigned long unsigned long
ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
{ {
# define PUT_BITS(first, last, nat) \
({ \
unsigned long bit = ia64_unat_pos(&pt->r##first); \
unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \
long dist; \
if (bit < first) \
dist = 64 + bit - first; \
else \
dist = bit - first; \
ia64_rotl(nat & mask, dist); \
})
unsigned long scratch_unat; unsigned long scratch_unat;
# define PUT_BITS(first, last, nat) \ /*
({ \ * Registers that are stored consecutively in struct pt_regs can be handled in
unsigned long bit = ia64_unat_pos(&pt->r##first); \ * parallel. If the register order in struct_pt_regs changes, this code MUST be
unsigned long mask = ((1UL << (last - first + 1)) - 1) << bit; \ * updated.
(ia64_rotr(nat, first) << bit) & mask; \ */
}) scratch_unat = PUT_BITS( 1, 1, nat);
scratch_unat = PUT_BITS( 1, 3, nat); scratch_unat |= PUT_BITS( 2, 3, nat);
scratch_unat |= PUT_BITS(12, 15, nat); scratch_unat |= PUT_BITS(12, 13, nat);
scratch_unat |= PUT_BITS(14, 14, nat);
scratch_unat |= PUT_BITS(15, 15, nat);
scratch_unat |= PUT_BITS( 8, 11, nat); scratch_unat |= PUT_BITS( 8, 11, nat);
scratch_unat |= PUT_BITS(16, 31, nat); scratch_unat |= PUT_BITS(16, 31, nat);
......
...@@ -347,12 +347,6 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr) ...@@ -347,12 +347,6 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
__copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16); __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16);
} }
/*
* Note: sw->ar_unat is UNDEFINED unless the process is being
* PTRACED. However, this is OK because the NaT bits of the
* preserved registers (r4-r7) are never being looked at by
* the signal handler (registers r4-r7 are used instead).
*/
nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat); nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat);
err = __put_user(flags, &sc->sc_flags); err = __put_user(flags, &sc->sc_flags);
......
...@@ -655,24 +655,13 @@ ia64_get_dbr (__u64 regnum) ...@@ -655,24 +655,13 @@ ia64_get_dbr (__u64 regnum)
return retval; return retval;
} }
/* XXX remove the handcoded version once we have a sufficiently clever compiler... */ static inline __u64
#ifdef SMART_COMPILER ia64_rotr (__u64 w, __u64 n)
# define ia64_rotr(w,n) \ {
({ \ return (w >> n) | (w << (64 - n));
__u64 __ia64_rotr_w = (w), _n = (n); \ }
\
(__ia64_rotr_w >> _n) | (__ia64_rotr_w << (64 - _n)); \
})
#else
# define ia64_rotr(w,n) \
({ \
__u64 __ia64_rotr_w; \
__ia64_rotr_w = ia64_shrp((w), (w), (n)); \
__ia64_rotr_w; \
})
#endif
#define ia64_rotl(w,n) ia64_rotr((w),(64)-(n)) #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
/* /*
* Take a mapped kernel address and return the equivalent address * Take a mapped kernel address and return the equivalent address
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment