Commit 32efa16c authored by David Chase's avatar David Chase

cmd/compile: added stats printing to stackalloc

This is controlled by the "regalloc" stats flag, since regalloc
calls stackalloc.  The plan is for this to allow comparison
of cheaper stack allocation algorithms with what we have now.

Change-Id: Ibf64a780344c69babfcbb328fd6d053ea2e02cfc
Reviewed-on: https://go-review.googlesource.com/21393
Run-TryBot: David Chase <drchase@google.com>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent 7e40627a
...@@ -22,6 +22,13 @@ type stackAllocState struct { ...@@ -22,6 +22,13 @@ type stackAllocState struct {
names []LocalSlot names []LocalSlot
slots []int slots []int
used []bool used []bool
nArgSlot, // Number of Values sourced to arg slot
nNotNeed, // Number of Values not needing a stack slot
nNamedSlot, // Number of Values using a named stack slot
nReuse, // Number of values reusing a stack slot
nAuto, // Number of autos allocated for stack slots.
nSelfInterfere int32 // Number of self-interferences
} }
func newStackAllocState(f *Func) *stackAllocState { func newStackAllocState(f *Func) *stackAllocState {
...@@ -54,6 +61,7 @@ func putStackAllocState(s *stackAllocState) { ...@@ -54,6 +61,7 @@ func putStackAllocState(s *stackAllocState) {
s.f.Config.stackAllocState = s s.f.Config.stackAllocState = s
s.f = nil s.f = nil
s.live = nil s.live = nil
s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0
} }
type stackValState struct { type stackValState struct {
...@@ -75,6 +83,13 @@ func stackalloc(f *Func, spillLive [][]ID) [][]ID { ...@@ -75,6 +83,13 @@ func stackalloc(f *Func, spillLive [][]ID) [][]ID {
defer putStackAllocState(s) defer putStackAllocState(s)
s.stackalloc() s.stackalloc()
if f.pass.stats > 0 {
f.logStat("stack_alloc_stats",
s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed",
s.nNamedSlot, "named_slots", s.nAuto, "auto_slots",
s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering")
}
return s.live return s.live
} }
...@@ -170,9 +185,11 @@ func (s *stackAllocState) stackalloc() { ...@@ -170,9 +185,11 @@ func (s *stackAllocState) stackalloc() {
for _, b := range f.Blocks { for _, b := range f.Blocks {
for _, v := range b.Values { for _, v := range b.Values {
if !s.values[v.ID].needSlot { if !s.values[v.ID].needSlot {
s.nNotNeed++
continue continue
} }
if v.Op == OpArg { if v.Op == OpArg {
s.nArgSlot++
continue // already picked continue // already picked
} }
...@@ -190,12 +207,14 @@ func (s *stackAllocState) stackalloc() { ...@@ -190,12 +207,14 @@ func (s *stackAllocState) stackalloc() {
if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off { if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
// A variable can interfere with itself. // A variable can interfere with itself.
// It is rare, but but it can happen. // It is rare, but but it can happen.
s.nSelfInterfere++
goto noname goto noname
} }
} }
if f.pass.debug > stackDebug { if f.pass.debug > stackDebug {
fmt.Printf("stackalloc %s to %s\n", v, name.Name()) fmt.Printf("stackalloc %s to %s\n", v, name.Name())
} }
s.nNamedSlot++
f.setHome(v, name) f.setHome(v, name)
continue continue
} }
...@@ -217,11 +236,13 @@ func (s *stackAllocState) stackalloc() { ...@@ -217,11 +236,13 @@ func (s *stackAllocState) stackalloc() {
var i int var i int
for i = 0; i < len(locs); i++ { for i = 0; i < len(locs); i++ {
if !used[i] { if !used[i] {
s.nReuse++
break break
} }
} }
// If there is no unused stack slot, allocate a new one. // If there is no unused stack slot, allocate a new one.
if i == len(locs) { if i == len(locs) {
s.nAuto++
locs = append(locs, LocalSlot{N: f.Config.fe.Auto(v.Type), Type: v.Type, Off: 0}) locs = append(locs, LocalSlot{N: f.Config.fe.Auto(v.Type), Type: v.Type, Off: 0})
locations[v.Type] = locs locations[v.Type] = locs
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment