Commit 397a7ba6 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Always record actual PC when kernel profiling.

We used to play games reporting the callers
PC in certain functions such as the rwlock
and atomic_t routines.  If anything, somethin
like this should be optional, not by default.
Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent 240bc998
...@@ -443,8 +443,7 @@ static inline void timer_check_rtc(void) ...@@ -443,8 +443,7 @@ static inline void timer_check_rtc(void)
void sparc64_do_profile(struct pt_regs *regs) void sparc64_do_profile(struct pt_regs *regs)
{ {
unsigned long pc = regs->tpc; unsigned long pc;
unsigned long o7 = regs->u_regs[UREG_RETPC];
profile_hook(regs); profile_hook(regs);
...@@ -454,32 +453,14 @@ void sparc64_do_profile(struct pt_regs *regs) ...@@ -454,32 +453,14 @@ void sparc64_do_profile(struct pt_regs *regs)
if (!prof_buffer) if (!prof_buffer)
return; return;
{ pc = regs->tpc;
extern int rwlock_impl_begin, rwlock_impl_end;
extern int atomic_impl_begin, atomic_impl_end; pc -= (unsigned long) _stext;
extern int __memcpy_begin, __memcpy_end; pc >>= prof_shift;
extern int __bzero_begin, __bzero_end;
extern int __bitops_begin, __bitops_end; if(pc >= prof_len)
pc = prof_len - 1;
if ((pc >= (unsigned long) &atomic_impl_begin && atomic_inc((atomic_t *)&prof_buffer[pc]);
pc < (unsigned long) &atomic_impl_end) ||
(pc >= (unsigned long) &rwlock_impl_begin &&
pc < (unsigned long) &rwlock_impl_end) ||
(pc >= (unsigned long) &__memcpy_begin &&
pc < (unsigned long) &__memcpy_end) ||
(pc >= (unsigned long) &__bzero_begin &&
pc < (unsigned long) &__bzero_end) ||
(pc >= (unsigned long) &__bitops_begin &&
pc < (unsigned long) &__bitops_end))
pc = o7;
pc -= (unsigned long) _stext;
pc >>= prof_shift;
if(pc >= prof_len)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
} }
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
......
...@@ -83,8 +83,6 @@ ...@@ -83,8 +83,6 @@
.text .text
.align 32 .align 32
#ifdef __KERNEL__ #ifdef __KERNEL__
.globl __bzero_begin
__bzero_begin:
.globl __bzero, __bzero_noasi .globl __bzero, __bzero_noasi
__bzero_noasi: __bzero_noasi:
rd %asi, %g5 rd %asi, %g5
...@@ -274,5 +272,3 @@ VISbzerofixup_zb: ...@@ -274,5 +272,3 @@ VISbzerofixup_zb:
ba,pt %xcc, VISbzerofixup_ret0 ba,pt %xcc, VISbzerofixup_ret0
sub %o1, %g2, %o0 sub %o1, %g2, %o0
#endif #endif
.globl __bzero_end
__bzero_end:
...@@ -303,9 +303,6 @@ ...@@ -303,9 +303,6 @@
.type bcopy,@function .type bcopy,@function
#ifdef __KERNEL__ #ifdef __KERNEL__
.globl __memcpy_begin
__memcpy_begin:
memcpy_private: memcpy_private:
memcpy: mov ASI_P, asi_src ! IEU0 Group memcpy: mov ASI_P, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI brnz,pt %o2, __memcpy_entry ! CTI
...@@ -1055,9 +1052,6 @@ fpu_retl: ...@@ -1055,9 +1052,6 @@ fpu_retl:
FPU_RETL FPU_RETL
#ifdef __KERNEL__ #ifdef __KERNEL__
.globl __memcpy_end
__memcpy_end:
.section .fixup .section .fixup
.align 4 .align 4
VIScopyfixup_reto2: VIScopyfixup_reto2:
......
...@@ -9,10 +9,7 @@ ...@@ -9,10 +9,7 @@
.text .text
.align 64 .align 64
.globl atomic_impl_begin, atomic_impl_end
.globl __atomic_add .globl __atomic_add
atomic_impl_begin:
__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
lduw [%o1], %g5 lduw [%o1], %g5
add %g5, %o0, %g7 add %g5, %o0, %g7
...@@ -56,4 +53,3 @@ __atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -56,4 +53,3 @@ __atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */
retl retl
sub %g7, %o0, %o0 sub %g7, %o0, %o0
atomic_impl_end:
...@@ -8,9 +8,6 @@ ...@@ -8,9 +8,6 @@
.text .text
.align 64 .align 64
.globl __bitops_begin
__bitops_begin:
.globl ___test_and_set_bit .globl ___test_and_set_bit
___test_and_set_bit: /* %o0=nr, %o1=addr */ ___test_and_set_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1 srlx %o0, 6, %g1
...@@ -105,6 +102,3 @@ ___test_and_clear_le_bit: /* %o0=nr, %o1=addr */ ...@@ -105,6 +102,3 @@ ___test_and_clear_le_bit: /* %o0=nr, %o1=addr */
lduwa [%o1] ASI_PL, %g7 lduwa [%o1] ASI_PL, %g7
2: retl 2: retl
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
.globl __bitops_end
__bitops_end:
...@@ -7,12 +7,9 @@ ...@@ -7,12 +7,9 @@
.text .text
.align 64 .align 64
.globl rwlock_impl_begin, rwlock_impl_end
/* The non-contention read lock usage is 2 cache lines. */ /* The non-contention read lock usage is 2 cache lines. */
.globl __read_lock, __read_unlock .globl __read_lock, __read_unlock
rwlock_impl_begin:
__read_lock: /* %o0 = lock_ptr */ __read_lock: /* %o0 = lock_ptr */
ldsw [%o0], %g5 ldsw [%o0], %g5
brlz,pn %g5, __read_wait_for_writer brlz,pn %g5, __read_wait_for_writer
...@@ -85,5 +82,4 @@ __write_trylock_succeed: ...@@ -85,5 +82,4 @@ __write_trylock_succeed:
__write_trylock_fail: __write_trylock_fail:
retl retl
mov 0, %o0 mov 0, %o0
rwlock_impl_end:
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment