Commit 4492811c authored by Russ Cox's avatar Russ Cox

cmd/internal/gc: manual goto removal + grind to move var decls

Also change gc.Naddr to return the Addr instead of filling it in.

Change-Id: I98a86705d23bee49626a12a042a4d51cabe290ea
Reviewed-on: https://go-review.googlesource.com/6601Reviewed-by: default avatarRob Pike <r@golang.org>
parent d0b59deb
......@@ -78,7 +78,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Pr
p.Reg = arm.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(f, &p.To, 1)
p.To = gc.Naddr(f, 1)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
......
......@@ -766,12 +766,14 @@ func gmove(f *gc.Node, t *gc.Node) {
// removed.
// requires register destination
rdst:
regalloc(&r1, t.Type, t)
{
regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
return
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
return
}
// requires register intermediate
hard:
......@@ -844,10 +846,10 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
var at obj.Addr
if f != nil {
gc.Naddr(f, &af, 1)
af = gc.Naddr(f, 1)
}
if t != nil {
gc.Naddr(t, &at, 1)
at = gc.Naddr(t, 1)
}
p := gc.Prog(as)
if f != nil {
......@@ -868,7 +870,7 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
func raddr(n *gc.Node, p *obj.Prog) {
var a obj.Addr
gc.Naddr(n, &a, 1)
a = gc.Naddr(n, 1)
if a.Type != obj.TYPE_REG {
if n != nil {
gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
......@@ -1304,7 +1306,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(n, a, 1)
*a = gc.Naddr(n, 1)
return true
case gc.ODOT,
......@@ -1328,7 +1330,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(&n1, a, 1)
*a = gc.Naddr(&n1, 1)
return true
}
......@@ -1356,7 +1358,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
a.Type = obj.TYPE_NONE
a.Name = obj.NAME_NONE
n1.Type = n.Type
gc.Naddr(&n1, a, 1)
*a = gc.Naddr(&n1, 1)
return true
case gc.OINDEX:
......
......@@ -1115,8 +1115,6 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
return
}
var nr *gc.Node
for n.Op == gc.OCONVNOP {
n = n.Left
if n.Ninit != nil {
......@@ -1125,6 +1123,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
var nl *gc.Node
var nr *gc.Node
switch n.Op {
default:
goto def
......
......@@ -503,7 +503,6 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gmove(&n31, &n3)
}
var p2 *obj.Prog
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
......@@ -520,6 +519,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc)
}
var p2 *obj.Prog
if check != 0 {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
......
......@@ -314,7 +314,6 @@ func gmove(f *gc.Node, t *gc.Node) {
}
// cannot have two memory operands
var r1 gc.Node
var a int
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
......@@ -669,15 +668,19 @@ func gmove(f *gc.Node, t *gc.Node) {
// requires register destination
rdst:
regalloc(&r1, t.Type, t)
{
var r1 gc.Node
regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
return
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
return
}
// requires register intermediate
hard:
var r1 gc.Node
regalloc(&r1, cvt, t)
gmove(f, &r1)
......@@ -744,12 +747,12 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
}
var af obj.Addr
var at obj.Addr
if f != nil {
gc.Naddr(f, &af, 1)
af = gc.Naddr(f, 1)
}
var at obj.Addr
if t != nil {
gc.Naddr(t, &at, 1)
at = gc.Naddr(t, 1)
}
p := gc.Prog(as)
if f != nil {
......@@ -1402,7 +1405,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(n, a, 1)
*a = gc.Naddr(n, 1)
return true
case gc.ODOT,
......@@ -1426,7 +1429,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(&n1, a, 1)
*a = gc.Naddr(&n1, 1)
return true
}
......@@ -1454,7 +1457,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
a.Type = obj.TYPE_NONE
a.Index = obj.TYPE_NONE
fixlargeoffset(&n1)
gc.Naddr(&n1, a, 1)
*a = gc.Naddr(&n1, 1)
return true
case gc.OINDEX:
......
......@@ -825,8 +825,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
return 0
}
var info gc.ProgInfo
info = proginfo(p)
info := proginfo(p)
if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
return 2
......
......@@ -1148,12 +1148,14 @@ rsrc:
// requires register destination
rdst:
regalloc(&r1, t.Type, t)
{
regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
return
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
return
}
// requires register intermediate
hard:
......@@ -1845,10 +1847,10 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
var af obj.Addr
var at obj.Addr
if f != nil {
gc.Naddr(f, &af, 1)
af = gc.Naddr(f, 1)
}
if t != nil {
gc.Naddr(t, &at, 1)
at = gc.Naddr(t, 1)
}
p := gc.Prog(as)
if f != nil {
......
......@@ -76,7 +76,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
p.Reg = ppc64.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(f, &p.To, 1)
p.To = gc.Naddr(f, 1)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
......
......@@ -669,12 +669,14 @@ func gmove(f *gc.Node, t *gc.Node) {
// requires register destination
rdst:
regalloc(&r1, t.Type, t)
{
regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
return
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
return
}
// requires register intermediate
hard:
......@@ -698,10 +700,10 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
at := obj.Addr(obj.Addr{})
if f != nil {
gc.Naddr(f, &af, 1)
af = gc.Naddr(f, 1)
}
if t != nil {
gc.Naddr(t, &at, 1)
at = gc.Naddr(t, 1)
}
p := (*obj.Prog)(gc.Prog(as))
if f != nil {
......
......@@ -596,7 +596,6 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
declare(xfunc.Nname, PFUNC)
// Declare and initialize variable holding receiver.
var body *NodeList
xfunc.Needctxt = true
cv := Nod(OCLOSUREVAR, nil, nil)
......@@ -613,6 +612,7 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
ptr.Used = 1
ptr.Curfn = xfunc
xfunc.Dcl = list(xfunc.Dcl, ptr)
var body *NodeList
if Isptr[rcvrtype.Etype] || Isinter(rcvrtype) {
ptr.Ntype = typenod(rcvrtype)
body = list(body, Nod(OAS, ptr, cv))
......
......@@ -313,7 +313,6 @@ func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
* new_name_list [[type] = expr_list]
*/
func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
var vv *NodeList
if cl == nil {
if t != nil {
Yyerror("const declaration cannot have type without expression")
......@@ -329,6 +328,7 @@ func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
var v *Node
var c *Node
var vv *NodeList
for ; vl != nil; vl = vl.Next {
if cl == nil {
Yyerror("missing value in const declaration")
......
......@@ -274,8 +274,7 @@ func markautoused(p *obj.Prog) {
}
}
func Naddr(n *Node, a *obj.Addr, canemitcode int) {
*a = obj.Addr{}
func Naddr(n *Node, canemitcode int) (a obj.Addr) {
if n == nil {
return
}
......@@ -294,7 +293,8 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
switch n.Op {
default:
Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
a := a // copy to let escape into Ctxt.Dconv
Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(&a))
case OREGISTER:
a.Type = obj.TYPE_REG
......@@ -338,7 +338,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
a.Offset = n.Xoffset
case OCFUNC:
Naddr(n.Left, a, canemitcode)
a = Naddr(n.Left, canemitcode)
a.Sym = Linksym(n.Left.Sym)
case ONAME:
......@@ -408,7 +408,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
a.Offset = Mpgetfix(n.Val.U.Xval)
case CTSTR:
datagostring(n.Val.U.Sval, a)
datagostring(n.Val.U.Sval, &a)
case CTBOOL:
a.Sym = nil
......@@ -422,19 +422,20 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
}
case OADDR:
Naddr(n.Left, a, canemitcode)
a = Naddr(n.Left, canemitcode)
a.Etype = uint8(Tptr)
if Thearch.Thechar != '5' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
a.Width = int64(Widthptr)
}
if a.Type != obj.TYPE_MEM {
Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
a := a // copy to let escape into Ctxt.Dconv
Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(&a), Oconv(int(n.Left.Op), 0))
}
a.Type = obj.TYPE_ADDR
// itable of interface value
case OITAB:
Naddr(n.Left, a, canemitcode)
a = Naddr(n.Left, canemitcode)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // itab(nil)
......@@ -444,7 +445,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
// pointer in a string or slice
case OSPTR:
Naddr(n.Left, a, canemitcode)
a = Naddr(n.Left, canemitcode)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // ptr(nil)
......@@ -455,7 +456,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
// len of string or slice
case OLEN:
Naddr(n.Left, a, canemitcode)
a = Naddr(n.Left, canemitcode)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // len(nil)
......@@ -471,7 +472,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
// cap of string or slice
case OCAP:
Naddr(n.Left, a, canemitcode)
a = Naddr(n.Left, canemitcode)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // cap(nil)
......@@ -485,6 +486,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
a.Width = int64(Widthint)
}
}
return
}
func newplist() *obj.Plist {
......
......@@ -556,13 +556,11 @@ func isfunny(n *Node) bool {
// initialized, because any use of a variable must come after its
// initialization.
func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avarinit *Bvec) {
var info ProgInfo
bvresetall(uevar)
bvresetall(varkill)
bvresetall(avarinit)
info = Thearch.Proginfo(prog)
info := Thearch.Proginfo(prog)
if prog.As == obj.ARET {
// Return instructions implicitly read all the arguments. For
// the sake of correctness, out arguments must be read. For the
......@@ -1087,8 +1085,8 @@ func newpcdataprog(prog *obj.Prog, index int32) *obj.Prog {
Nodconst(&to, Types[TINT32], int64(index))
pcdata := unlinkedprog(obj.APCDATA)
pcdata.Lineno = prog.Lineno
Naddr(&from, &pcdata.From, 0)
Naddr(&to, &pcdata.To, 0)
pcdata.From = Naddr(&from, 0)
pcdata.To = Naddr(&to, 0)
return pcdata
}
......@@ -1296,7 +1294,6 @@ func livenessepilogue(lv *Liveness) {
any := bvalloc(nvars)
all := bvalloc(nvars)
ambig := bvalloc(localswords() * obj.BitsPerPointer)
var msg []string
nmsg := int32(0)
startmsg := int32(0)
......@@ -1392,6 +1389,7 @@ func livenessepilogue(lv *Liveness) {
var fmt_ string
var next *obj.Prog
var numlive int32
var msg []string
for i := int32(0); i < int32(len(lv.cfg)); i++ {
bb = lv.cfg[i]
......
......@@ -136,7 +136,6 @@ out:
func walkrange(n *Node) {
t := n.Type
var init *NodeList
a := n.Right
lno := int(setlineno(a))
......@@ -154,9 +153,8 @@ func walkrange(n *Node) {
// to avoid erroneous processing by racewalk.
n.List = nil
var hv2 *Node
var body *NodeList
var init *NodeList
switch t.Etype {
default:
Fatal("walkrange")
......@@ -366,6 +364,7 @@ func walkrange(n *Node) {
init = list(init, Nod(OAS, hv1, nil))
var a *Node
var hv2 *Node
if v2 == nil {
a = Nod(OAS, hv1, mkcall("stringiter", Types[TINT], nil, ha, hv1))
} else {
......
This diff is collapsed.
......@@ -3235,9 +3235,6 @@ func sliceany(n *Node, init **NodeList) *Node {
// Checking src[lb:hb:cb] or src[lb:hb].
// if chk0 || chk1 || chk2 { panicslice() }
var chk0 *Node // cap(src) < cb
var chk1 *Node // cb < hb for src[lb:hb:cb]; cap(src) < hb for src[lb:hb]
var chk2 *Node // hb < lb
// All comparisons are unsigned to avoid testing < 0.
bt := Types[Simtype[TUINT]]
......@@ -3254,6 +3251,7 @@ func sliceany(n *Node, init **NodeList) *Node {
bound = cheapexpr(conv(bound, bt), init)
var chk0 *Node // cap(src) < cb
if cb != nil {
cb = cheapexpr(conv(cb, bt), init)
if bounded == 0 {
......@@ -3264,6 +3262,7 @@ func sliceany(n *Node, init **NodeList) *Node {
Fatal("slice3 with cb == N") // rejected by parser
}
var chk1 *Node // cb < hb for src[lb:hb:cb]; cap(src) < hb for src[lb:hb]
if hb != nil {
hb = cheapexpr(conv(hb, bt), init)
if bounded == 0 {
......@@ -3285,6 +3284,7 @@ func sliceany(n *Node, init **NodeList) *Node {
hb = cheapexpr(conv(hb, bt), init)
}
var chk2 *Node // hb < lb
if lb != nil {
lb = cheapexpr(conv(lb, bt), init)
if bounded == 0 {
......@@ -3432,14 +3432,6 @@ func walkcompare(np **Node, init **NodeList) {
r = n.Left
}
var call *Node
var a *Node
var cmpl *Node
var cmpr *Node
var andor int
var expr *Node
var needsize int
var t *Type
if l != nil {
x := temp(r.Type)
ok := temp(Types[TBOOL])
......@@ -3464,12 +3456,13 @@ func walkcompare(np **Node, init **NodeList) {
r = Nod(OOROR, Nod(ONOT, ok, nil), Nod(ONE, x, r))
}
*init = list(*init, expr)
goto ret
finishcompare(np, n, r, init)
return
}
// Must be comparison of array or struct.
// Otherwise back end handles it.
t = n.Left.Type
t := n.Left.Type
switch t.Etype {
default:
......@@ -3484,11 +3477,11 @@ func walkcompare(np **Node, init **NodeList) {
break
}
cmpl = n.Left
cmpl := n.Left
for cmpl != nil && cmpl.Op == OCONVNOP {
cmpl = cmpl.Left
}
cmpr = n.Right
cmpr := n.Right
for cmpr != nil && cmpr.Op == OCONVNOP {
cmpr = cmpr.Left
}
......@@ -3498,7 +3491,7 @@ func walkcompare(np **Node, init **NodeList) {
}
l = temp(Ptrto(t))
a = Nod(OAS, l, Nod(OADDR, cmpl, nil))
a := Nod(OAS, l, Nod(OADDR, cmpl, nil))
a.Right.Etype = 1 // addr does not escape
typecheck(&a, Etop)
*init = list(*init, a)
......@@ -3509,12 +3502,12 @@ func walkcompare(np **Node, init **NodeList) {
typecheck(&a, Etop)
*init = list(*init, a)
expr = nil
andor = OANDAND
andor := OANDAND
if n.Op == ONE {
andor = OOROR
}
var expr *Node
if t.Etype == TARRAY && t.Bound <= 4 && issimple[t.Type.Etype] {
// Four or fewer elements of a basic type.
// Unroll comparisons.
......@@ -3534,8 +3527,8 @@ func walkcompare(np **Node, init **NodeList) {
if expr == nil {
expr = Nodbool(n.Op == OEQ)
}
r = expr
goto ret
finishcompare(np, n, expr, init)
return
}
if t.Etype == TSTRUCT && countfield(t) <= 4 {
......@@ -3560,12 +3553,13 @@ func walkcompare(np **Node, init **NodeList) {
if expr == nil {
expr = Nodbool(n.Op == OEQ)
}
r = expr
goto ret
finishcompare(np, n, expr, init)
return
}
// Chose not to inline. Call equality function directly.
call = Nod(OCALL, eqfor(t, &needsize), nil)
var needsize int
call := Nod(OCALL, eqfor(t, &needsize), nil)
call.List = list(call.List, l)
call.List = list(call.List, r)
......@@ -3576,19 +3570,23 @@ func walkcompare(np **Node, init **NodeList) {
if n.Op != OEQ {
r = Nod(ONOT, r, nil)
}
goto ret
ret:
typecheck(&r, Erv)
walkexpr(&r, init)
finishcompare(np, n, r, init)
return
}
func finishcompare(np **Node, n, r *Node, init **NodeList) {
// Using np here to avoid passing &r to typecheck.
*np = r
typecheck(np, Erv)
walkexpr(np, init)
r = *np
if r.Type != n.Type {
r = Nod(OCONVNOP, r, nil)
r.Type = n.Type
r.Typecheck = 1
*np = r
}
*np = r
return
}
func samecheap(a *Node, b *Node) bool {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment