Commit 4dbfeac2 authored by David Mosberger's avatar David Mosberger

ia64: Add unwcheck.sh script contributed by Harish Patil. It checks

	the unwind info for consistency (well, just the obvious
	stuff, but it's a start).
	Fix the couple of bugs that this script uncovered (and work
	around one false positive).
parent cc735c78
...@@ -58,6 +58,9 @@ all compressed: vmlinux.gz ...@@ -58,6 +58,9 @@ all compressed: vmlinux.gz
vmlinux.gz: vmlinux vmlinux.gz: vmlinux
$(call makeboot,vmlinux.gz) $(call makeboot,vmlinux.gz)
check: vmlinux
arch/ia64/scripts/unwcheck.sh vmlinux
archmrproper: archmrproper:
archclean: archclean:
$(Q)$(MAKE) -f scripts/Makefile.clean obj=arch/ia64/boot $(Q)$(MAKE) -f scripts/Makefile.clean obj=arch/ia64/boot
......
...@@ -95,12 +95,19 @@ END(sys32_sigsuspend) ...@@ -95,12 +95,19 @@ END(sys32_sigsuspend)
GLOBAL_ENTRY(ia32_ret_from_clone) GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
{ /*
* Some versions of gas generate bad unwind info if the first instruction of a
* procedure doesn't go into the first slot of a bundle. This is a workaround.
*/
nop.m 0
nop.i 0
/* /*
* We need to call schedule_tail() to complete the scheduling process. * We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the * Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task. * address of the previously executing task.
*/ */
br.call.sptk.many rp=ia64_invoke_schedule_tail br.call.sptk.many rp=ia64_invoke_schedule_tail
}
.ret1: .ret1:
#endif #endif
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
......
...@@ -507,7 +507,14 @@ END(invoke_syscall_trace) ...@@ -507,7 +507,14 @@ END(invoke_syscall_trace)
GLOBAL_ENTRY(ia64_trace_syscall) GLOBAL_ENTRY(ia64_trace_syscall)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
{ /*
* Some versions of gas generate bad unwind info if the first instruction of a
* procedure doesn't go into the first slot of a bundle. This is a workaround.
*/
nop.m 0
nop.i 0
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args
}
.ret6: br.call.sptk.many rp=b6 // do the syscall .ret6: br.call.sptk.many rp=b6 // do the syscall
strace_check_retval: strace_check_retval:
cmp.lt p6,p0=r8,r0 // syscall failed? cmp.lt p6,p0=r8,r0 // syscall failed?
...@@ -537,12 +544,19 @@ END(ia64_trace_syscall) ...@@ -537,12 +544,19 @@ END(ia64_trace_syscall)
GLOBAL_ENTRY(ia64_ret_from_clone) GLOBAL_ENTRY(ia64_ret_from_clone)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
{ /*
* Some versions of gas generate bad unwind info if the first instruction of a
* procedure doesn't go into the first slot of a bundle. This is a workaround.
*/
nop.m 0
nop.i 0
/* /*
* We need to call schedule_tail() to complete the scheduling process. * We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task. * address of the previously executing task.
*/ */
br.call.sptk.many rp=ia64_invoke_schedule_tail br.call.sptk.many rp=ia64_invoke_schedule_tail
}
.ret8: .ret8:
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;; ;;
...@@ -716,21 +730,9 @@ dont_preserve_current_frame: ...@@ -716,21 +730,9 @@ dont_preserve_current_frame:
shr.u loc1=r18,9 // RNaTslots <= dirtySize / (64*8) + 1 shr.u loc1=r18,9 // RNaTslots <= dirtySize / (64*8) + 1
sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
;; ;;
#if 1
.align 32 // see comment below about gas bug...
#endif
mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
shladd in0=loc1,3,r17 shladd in0=loc1,3,r17
mov in1=0 mov in1=0
#if 0
// gas-2.12.90 is unable to generate a stop bit after .align, which is bad,
// because alloc must be at the beginning of an insn-group.
.align 32
#else
nop 0
nop 0
nop 0
#endif
;; ;;
rse_clear_invalid: rse_clear_invalid:
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
...@@ -969,12 +971,11 @@ ENTRY(sys_rt_sigreturn) ...@@ -969,12 +971,11 @@ ENTRY(sys_rt_sigreturn)
END(sys_rt_sigreturn) END(sys_rt_sigreturn)
GLOBAL_ENTRY(ia64_prepare_handle_unaligned) GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
//
// r16 = fake ar.pfs, we simply need to make sure
// privilege is still 0
//
mov r16=r0
.prologue .prologue
/*
* r16 = fake ar.pfs, we simply need to make sure privilege is still 0
*/
mov r16=r0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt
.ret21: .body .ret21: .body
......
...@@ -144,15 +144,18 @@ END(fsys_fallback_syscall) ...@@ -144,15 +144,18 @@ END(fsys_fallback_syscall)
* call stack. * call stack.
*/ */
#define SIGTRAMP_SAVES \
.unwabi @svr4, 's' // mark this as a sigtramp handler (saves scratch regs) \
.savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF \
.savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF \
.savesp pr, PR_OFF+SIGCONTEXT_OFF \
.savesp rp, RP_OFF+SIGCONTEXT_OFF \
.vframesp SP_OFF+SIGCONTEXT_OFF
GLOBAL_ENTRY(ia64_sigtramp) GLOBAL_ENTRY(ia64_sigtramp)
// describe the state that is active when we get here: // describe the state that is active when we get here:
.prologue .prologue
.unwabi @svr4, 's' // mark this as a sigtramp handler (saves scratch regs) SIGTRAMP_SAVES
.savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF
.savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF
.savesp pr, PR_OFF+SIGCONTEXT_OFF
.savesp rp, RP_OFF+SIGCONTEXT_OFF
.vframesp SP_OFF+SIGCONTEXT_OFF
.body .body
.label_state 1 .label_state 1
...@@ -237,10 +240,11 @@ back_from_restore_rbs: ...@@ -237,10 +240,11 @@ back_from_restore_rbs:
ldf.fill f14=[base0],32 ldf.fill f14=[base0],32
ldf.fill f15=[base1],32 ldf.fill f15=[base1],32
mov r15=__NR_rt_sigreturn mov r15=__NR_rt_sigreturn
.restore sp // pop .prologue
break __BREAK_SYSCALL break __BREAK_SYSCALL
.body .prologue
.copy_state 1 SIGTRAMP_SAVES
setup_rbs: setup_rbs:
mov ar.rsc=0 // put RSE into enforced lazy mode mov ar.rsc=0 // put RSE into enforced lazy mode
;; ;;
...@@ -252,6 +256,7 @@ setup_rbs: ...@@ -252,6 +256,7 @@ setup_rbs:
;; ;;
.spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
st8 [r14]=r16 // save sc_ar_rnat st8 [r14]=r16 // save sc_ar_rnat
.body
adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp
mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16 mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16
...@@ -263,10 +268,11 @@ setup_rbs: ...@@ -263,10 +268,11 @@ setup_rbs:
;; ;;
st8 [r14]=r15 // save sc_loadrs st8 [r14]=r15 // save sc_loadrs
mov ar.rsc=0xf // set RSE into eager mode, pl 3 mov ar.rsc=0xf // set RSE into eager mode, pl 3
.restore sp // pop .prologue
br.cond.sptk back_from_setup_rbs br.cond.sptk back_from_setup_rbs
.prologue .prologue
.copy_state 1 SIGTRAMP_SAVES
.spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
.body .body
restore_rbs: restore_rbs:
......
...@@ -159,7 +159,7 @@ GLOBAL_ENTRY(__copy_user) ...@@ -159,7 +159,7 @@ GLOBAL_ENTRY(__copy_user)
mov ar.ec=2 mov ar.ec=2
(p10) br.dpnt.few .aligned_src_tail (p10) br.dpnt.few .aligned_src_tail
;; ;;
.align 32 // .align 32
1: 1:
EX(.ex_handler, (p16) ld8 r34=[src0],16) EX(.ex_handler, (p16) ld8 r34=[src0],16)
EK(.ex_handler, (p16) ld8 r38=[src1],16) EK(.ex_handler, (p16) ld8 r38=[src1],16)
...@@ -316,7 +316,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8) ...@@ -316,7 +316,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8)
(p7) mov ar.lc = r21 (p7) mov ar.lc = r21
(p8) mov ar.lc = r0 (p8) mov ar.lc = r0
;; ;;
.align 32 // .align 32
1: lfetch.fault [src_pre_mem], 128 1: lfetch.fault [src_pre_mem], 128
lfetch.fault.excl [dst_pre_mem], 128 lfetch.fault.excl [dst_pre_mem], 128
br.cloop.dptk.few 1b br.cloop.dptk.few 1b
...@@ -522,7 +522,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ...@@ -522,7 +522,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
shrp r21=r22,r38,shift; /* speculative work */ \ shrp r21=r22,r38,shift; /* speculative work */ \
br.sptk.few .unaligned_src_tail /* branch out of jump table */ \ br.sptk.few .unaligned_src_tail /* branch out of jump table */ \
;; ;;
.align 32 // .align 32
.jump_table: .jump_table:
COPYU(8) // unaligned cases COPYU(8) // unaligned cases
.jmp1: .jmp1:
......
...@@ -125,7 +125,7 @@ GLOBAL_ENTRY(memset) ...@@ -125,7 +125,7 @@ GLOBAL_ENTRY(memset)
(p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill (p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill
;; } ;; }
.align 32 // -------------------------- // L1A: store ahead into cache lines; fill later // .align 32 // -------------------------- // L1A: store ahead into cache lines; fill later
{ .mmi { .mmi
and tmp = -(LINE_SIZE), cnt // compute end of range and tmp = -(LINE_SIZE), cnt // compute end of range
mov ptr9 = ptr1 // used for prefetching mov ptr9 = ptr1 // used for prefetching
...@@ -194,7 +194,7 @@ GLOBAL_ENTRY(memset) ...@@ -194,7 +194,7 @@ GLOBAL_ENTRY(memset)
br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3 br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3
;; } ;; }
.align 32 // .align 32
.l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later .l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later
{ .mmi { .mmi
and tmp = -(LINE_SIZE), cnt // compute end of range and tmp = -(LINE_SIZE), cnt // compute end of range
...@@ -261,7 +261,7 @@ GLOBAL_ENTRY(memset) ...@@ -261,7 +261,7 @@ GLOBAL_ENTRY(memset)
and cnt = 0x1f, cnt // compute the remaining cnt and cnt = 0x1f, cnt // compute the remaining cnt
mov.i ar.lc = loopcnt mov.i ar.lc = loopcnt
;; } ;; }
.align 32 // .align 32
.l2: // ------------------------------------ // L2A: store 32B in 2 cycles .l2: // ------------------------------------ // L2A: store 32B in 2 cycles
{ .mmb { .mmb
stf8 [ptr1] = fvalue, 8 stf8 [ptr1] = fvalue, 8
......
#!/bin/sh
# Usage: unwcheck.sh <executable_file_name>
# Pre-requisite: readelf [from Gnu binutils package]
# Purpose: Check the following invariant
# For each code range in the input binary:
# Sum[ lengths of unwind regions] = Number of slots in code range.
# Author : Harish Patil
# First version: January 2002
# Modified : 2/13/2002
# Modified : 3/15/2002: duplicate detection
readelf -u $1 | gawk '\
function todec(hexstr){
dec = 0;
l = length(hexstr);
for (i = 1; i <= l; i++)
{
c = substr(hexstr, i, 1);
if (c == "A")
dec = dec*16 + 10;
else if (c == "B")
dec = dec*16 + 11;
else if (c == "C")
dec = dec*16 + 12;
else if (c == "D")
dec = dec*16 + 13;
else if (c == "E")
dec = dec*16 + 14;
else if (c == "F")
dec = dec*16 + 15;
else
dec = dec*16 + c;
}
return dec;
}
BEGIN { first = 1; sum_rlen = 0; no_slots = 0; errors=0; no_code_ranges=0; }
{
if (NF==5 && $3=="info")
{
no_code_ranges += 1;
if (first == 0)
{
if (sum_rlen != no_slots)
{
print full_code_range;
print " ", "lo = ", lo, " hi =", hi;
print " ", "sum_rlen = ", sum_rlen, "no_slots = " no_slots;
print " "," ", "*******ERROR ***********";
print " "," ", "sum_rlen:", sum_rlen, " != no_slots:" no_slots;
errors += 1;
}
sum_rlen = 0;
}
full_code_range = $0;
code_range = $2;
gsub("..$", "", code_range);
gsub("^.", "", code_range);
split(code_range, addr, "-");
lo = toupper(addr[1]);
code_range_lo[no_code_ranges] = addr[1];
occurs[addr[1]] += 1;
full_range[addr[1]] = $0;
gsub("0X.[0]*", "", lo);
hi = toupper(addr[2]);
gsub("0X.[0]*", "", hi);
no_slots = (todec(hi) - todec(lo))/ 16*3
first = 0;
}
if (index($0,"rlen") > 0 )
{
rlen_str = substr($0, index($0,"rlen"));
rlen = rlen_str;
gsub("rlen=", "", rlen);
gsub(")", "", rlen);
sum_rlen = sum_rlen + rlen;
}
}
END {
if (first == 0)
{
if (sum_rlen != no_slots)
{
print "code_range=", code_range;
print " ", "lo = ", lo, " hi =", hi;
print " ", "sum_rlen = ", sum_rlen, "no_slots = " no_slots;
print " "," ", "*******ERROR ***********";
print " "," ", "sum_rlen:", sum_rlen, " != no_slots:" no_slots;
errors += 1;
}
}
no_duplicates = 0;
for (i=1; i<=no_code_ranges; i++)
{
cr = code_range_lo[i];
if (reported_cr[cr]==1) continue;
if ( occurs[cr] > 1)
{
reported_cr[cr] = 1;
print "Code range low ", code_range_lo[i], ":", full_range[cr], " occurs: ", occurs[cr], " times.";
print " ";
no_duplicates++;
}
}
print "======================================"
print "Total errors:", errors, "/", no_code_ranges, " duplicates:", no_duplicates;
print "======================================"
}
'
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment