Commit 081129e2 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: allocate internal symbol table eagerly

we need it for GC anyway.

R=golang-dev, khr, dave, khr
CC=golang-dev
https://golang.org/cl/9728044
parent 4d6bfcf2
...@@ -128,10 +128,6 @@ runtime·SetCPUProfileRate(intgo hz) ...@@ -128,10 +128,6 @@ runtime·SetCPUProfileRate(intgo hz)
uintptr *p; uintptr *p;
uintptr n; uintptr n;
// Call findfunc now so that it won't have to
// build tables during the signal handler.
runtime·findfunc(0);
// Clamp hz to something reasonable. // Clamp hz to something reasonable.
if(hz < 0) if(hz < 0)
hz = 0; hz = 0;
......
...@@ -133,10 +133,8 @@ runtime·schedinit(void) ...@@ -133,10 +133,8 @@ runtime·schedinit(void)
runtime·goargs(); runtime·goargs();
runtime·goenvs(); runtime·goenvs();
// For debugging: // Allocate internal symbol table representation now, we need it for GC anyway.
// Allocate internal symbol table representation now, runtime·symtabinit();
// so that we don't need to call malloc when we crash.
// runtime·findfunc(0);
runtime·sched.lastpoll = runtime·nanotime(); runtime·sched.lastpoll = runtime·nanotime();
procs = 1; procs = 1;
......
...@@ -749,6 +749,7 @@ void runtime·mpreinit(M*); ...@@ -749,6 +749,7 @@ void runtime·mpreinit(M*);
void runtime·minit(void); void runtime·minit(void);
void runtime·unminit(void); void runtime·unminit(void);
void runtime·signalstack(byte*, int32); void runtime·signalstack(byte*, int32);
void runtime·symtabinit(void);
Func* runtime·findfunc(uintptr); Func* runtime·findfunc(uintptr);
int32 runtime·funcline(Func*, uintptr); int32 runtime·funcline(Func*, uintptr);
void* runtime·stackalloc(uint32); void* runtime·stackalloc(uint32);
......
...@@ -193,8 +193,6 @@ static int32 nfunc; ...@@ -193,8 +193,6 @@ static int32 nfunc;
static byte **fname; static byte **fname;
static int32 nfname; static int32 nfname;
static uint32 funcinit;
static Lock funclock;
static uintptr lastvalue; static uintptr lastvalue;
static void static void
...@@ -539,8 +537,8 @@ runtime·funcline_go(Func *f, uintptr targetpc, String retfile, intgo retline) ...@@ -539,8 +537,8 @@ runtime·funcline_go(Func *f, uintptr targetpc, String retfile, intgo retline)
FLUSH(&retline); FLUSH(&retline);
} }
static void void
buildfuncs(void) runtime·symtabinit(void)
{ {
extern byte etext[]; extern byte etext[];
...@@ -591,26 +589,6 @@ runtime·findfunc(uintptr addr) ...@@ -591,26 +589,6 @@ runtime·findfunc(uintptr addr)
Func *f; Func *f;
int32 nf, n; int32 nf, n;
// Use atomic double-checked locking,
// because when called from pprof signal
// handler, findfunc must run without
// grabbing any locks.
// (Before enabling the signal handler,
// SetCPUProfileRate calls findfunc to trigger
// the initialization outside the handler.)
// Avoid deadlock on fault during malloc
// by not calling buildfuncs if we're already in malloc.
if(!m->mallocing && !m->gcing) {
if(runtime·atomicload(&funcinit) == 0) {
runtime·lock(&funclock);
if(funcinit == 0) {
buildfuncs();
runtime·atomicstore(&funcinit, 1);
}
runtime·unlock(&funclock);
}
}
if(nfunc == 0) if(nfunc == 0)
return nil; return nil;
if(addr < func[0].entry || addr >= func[nfunc].entry) if(addr < func[0].entry || addr >= func[nfunc].entry)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment