Commit e7cc9c4f authored by Keith Randall's avatar Keith Randall

cmd/compile: delete lots of the legacy backend

It's not everything, but it is a good start.

I tried to make the CL delete only.  goimports forced
a few exceptions to that rule.

Update #16357

Change-Id: I041925cb2fe68bb7ae1617af862b22c48da649c1
Reviewed-on: https://go-review.googlesource.com/29168
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: default avatarJosh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: default avatarMartin Möhrmann <martisch@uos.de>
parent 3134ab3c
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package amd64
import (
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
var nodsi gc.Node
gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
var nodl gc.Node
var nodr gc.Node
if n.Ullman >= ns.Ullman {
gc.Agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
} else {
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
gc.Agenr(n, &nodr, &nodsi)
}
if nodl.Reg != x86.REG_DI {
gmove(&nodl, &noddi)
}
if nodr.Reg != x86.REG_SI {
gmove(&nodr, &nodsi)
}
gc.Regfree(&nodl)
gc.Regfree(&nodr)
c := w % 8 // bytes
q := w / 8 // quads
var oldcx gc.Node
var cx gc.Node
savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(addptr, w-1, x86.REG_SI)
gconreg(addptr, w-1, x86.REG_DI)
gconreg(movptr, c, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(addptr, -7, x86.REG_SI)
gconreg(addptr, -7, x86.REG_DI)
} else {
gconreg(addptr, w-8, x86.REG_SI)
gconreg(addptr, w-8, x86.REG_DI)
}
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
// normal direction
if q > 128 || (gc.Nacl && q >= 4) || (obj.GOOS == "plan9" && q >= 4) {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
var oldx0 gc.Node
var x0 gc.Node
savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 64 blocks taking 14 bytes each
// see ../../../../runtime/mkduff.go
p.To.Offset = 14 * (64 - q/2)
restx(&x0, &oldx0)
if q%2 != 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
}
} else if !gc.Nacl && c == 0 {
// We don't need the MOVSQ side-effect of updating SI and DI,
// and issuing a sequence of MOVQs directly is faster.
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
for q > 0 {
gmove(&nodsi, &cx) // MOVQ x+(SI),CX
gmove(&cx, &noddi) // MOVQ CX,x+(DI)
nodsi.Xoffset += 8
noddi.Xoffset += 8
q--
}
} else {
for q > 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
q--
}
}
// copy the remaining c bytes
if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) {
for c > 0 {
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
c--
}
} else if w < 8 || c <= 4 {
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
cx.Type = gc.Types[gc.TINT32]
nodsi.Type = gc.Types[gc.TINT32]
noddi.Type = gc.Types[gc.TINT32]
if c > 4 {
nodsi.Xoffset = 0
noddi.Xoffset = 0
gmove(&nodsi, &cx)
gmove(&cx, &noddi)
}
nodsi.Xoffset = c - 4
noddi.Xoffset = c - 4
gmove(&nodsi, &cx)
gmove(&cx, &noddi)
} else {
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
cx.Type = gc.Types[gc.TINT64]
nodsi.Type = gc.Types[gc.TINT64]
noddi.Type = gc.Types[gc.TINT64]
nodsi.Xoffset = c - 8
noddi.Xoffset = c - 8
gmove(&nodsi, &cx)
gmove(&cx, &noddi)
}
}
restx(&cx, &oldcx)
}
...@@ -11,20 +11,13 @@ import ( ...@@ -11,20 +11,13 @@ import (
) )
var ( var (
addptr = x86.AADDQ
movptr = x86.AMOVQ
leaptr = x86.ALEAQ leaptr = x86.ALEAQ
cmpptr = x86.ACMPQ
) )
func betypeinit() { func betypeinit() {
if obj.GOARCH == "amd64p32" { if obj.GOARCH == "amd64p32" {
addptr = x86.AADDL
movptr = x86.AMOVL
leaptr = x86.ALEAL leaptr = x86.ALEAL
cmpptr = x86.ACMPL
} }
if gc.Ctxt.Flag_dynlink || obj.GOOS == "nacl" { if gc.Ctxt.Flag_dynlink || obj.GOOS == "nacl" {
resvd = append(resvd, x86.REG_R15) resvd = append(resvd, x86.REG_R15)
} }
...@@ -50,40 +43,10 @@ func Main() { ...@@ -50,40 +43,10 @@ func Main() {
gc.Thearch.FREGMAX = x86.REG_X15 gc.Thearch.FREGMAX = x86.REG_X15
gc.Thearch.MAXWIDTH = 1 << 50 gc.Thearch.MAXWIDTH = 1 << 50
gc.Thearch.AddIndex = addindex
gc.Thearch.Betypeinit = betypeinit gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Cgen_bmul = cgen_bmul
gc.Thearch.Cgen_hmul = cgen_hmul
gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Dodiv = dodiv
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Getg = getg
gc.Thearch.Gins = gins gc.Thearch.Gins = gins
gc.Thearch.Ginsboolval = ginsboolval
gc.Thearch.Ginscmp = ginscmp
gc.Thearch.Ginscon = ginscon
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Blockcopy = blockcopy
gc.Thearch.Sudoaddable = sudoaddable
gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = FtoB
gc.Thearch.BtoR = BtoR
gc.Thearch.BtoF = BtoF
gc.Thearch.Optoas = optoas
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
gc.Thearch.SSARegToReg = ssaRegToReg gc.Thearch.SSARegToReg = ssaRegToReg
gc.Thearch.SSAMarkMoves = ssaMarkMoves gc.Thearch.SSAMarkMoves = ssaMarkMoves
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -30,72 +30,7 @@ ...@@ -30,72 +30,7 @@
package amd64 package amd64
import ( import "cmd/internal/obj/x86"
"cmd/compile/internal/gc"
"cmd/internal/obj/x86"
)
const (
NREGVAR = 32
)
var regname = []string{
".AX",
".CX",
".DX",
".BX",
".SP",
".BP",
".SI",
".DI",
".R8",
".R9",
".R10",
".R11",
".R12",
".R13",
".R14",
".R15",
".X0",
".X1",
".X2",
".X3",
".X4",
".X5",
".X6",
".X7",
".X8",
".X9",
".X10",
".X11",
".X12",
".X13",
".X14",
".X15",
}
func regnames(n *int) []string {
*n = NREGVAR
return regname
}
func excludedregs() uint64 {
return RtoB(x86.REG_SP)
}
func doregbits(r int) uint64 {
b := uint64(0)
if r >= x86.REG_AX && r <= x86.REG_R15 {
b |= RtoB(r)
} else if r >= x86.REG_AL && r <= x86.REG_R15B {
b |= RtoB(r - x86.REG_AL + x86.REG_AX)
} else if r >= x86.REG_AH && r <= x86.REG_BH {
b |= RtoB(r - x86.REG_AH + x86.REG_AX)
} else if r >= x86.REG_X0 && r <= x86.REG_X0+15 {
b |= FtoB(r)
}
return b
}
// For ProgInfo. // For ProgInfo.
const ( const (
...@@ -115,38 +50,3 @@ func RtoB(r int) uint64 { ...@@ -115,38 +50,3 @@ func RtoB(r int) uint64 {
} }
return 1 << uint(r-x86.REG_AX) return 1 << uint(r-x86.REG_AX)
} }
func BtoR(b uint64) int {
b &= 0xffff
if gc.Nacl {
b &^= (1<<(x86.REG_BP-x86.REG_AX) | 1<<(x86.REG_R15-x86.REG_AX))
} else if gc.Ctxt.Framepointer_enabled {
// BP is part of the calling convention if framepointer_enabled.
b &^= (1 << (x86.REG_BP - x86.REG_AX))
}
if b == 0 {
return 0
}
return gc.Bitno(b) + x86.REG_AX
}
/*
* bit reg
* 16 X0
* ...
* 31 X15
*/
func FtoB(f int) uint64 {
if f < x86.REG_X0 || f > x86.REG_X15 {
return 0
}
return 1 << uint(f-x86.REG_X0+16)
}
func BtoF(b uint64) int {
b &= 0xFFFF0000
if b == 0 {
return 0
}
return gc.Bitno(b) - 16 + x86.REG_X0
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arm
import (
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
/*
* generate array index into res.
* n might be any size; res is 32-bit.
* returns Prog* to patch to panic call.
*/
func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
gc.Cgen(n, res)
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.TINT32], nil)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gmove(&hi, &n1)
gmove(&zero, &n2)
gins(arm.ACMP, &n1, &n2)
gc.Regfree(&n2)
gc.Regfree(&n1)
splitclean()
return gc.Gbranch(arm.ABNE, nil, -1)
}
func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
gc.Tempname(res, n.Type)
return cgenindex(n, res, bounded)
}
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = arm.AMOVB
case 2:
op = arm.AMOVH
case 4:
op = arm.AMOVW
}
if w%int64(align) != 0 {
gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
if osrc%int64(align) != 0 || odst%int64(align) != 0 {
gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
if osrc < odst && odst < osrc+w {
dir = -dir
}
if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R0 + 1
var r2 gc.Node
r2.Op = gc.OREGISTER
r2.Reg = arm.REG_R0 + 2
var src gc.Node
gc.Regalloc(&src, gc.Types[gc.Tptr], &r1)
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2)
if n.Ullman >= res.Ullman {
// eval n first
gc.Agen(n, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
// eval res first
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
gc.Agen(n, &src)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
f := gc.Sysfunc("duffcopy")
p := gins(obj.ADUFFCOPY, nil, f)
gc.Afunclit(&p.To, f)
// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 8 * (128 - int64(c))
gc.Regfree(&tmp)
gc.Regfree(&src)
gc.Regfree(&dst)
return
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm.AMOVW, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil)
// set up end marker
var nend gc.Node
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil)
p := gins(arm.AMOVW, &src, &nend)
p.From.Type = obj.TYPE_ADDR
if dir < 0 {
p.From.Offset = int64(dir)
} else {
p.From.Offset = w
}
}
// move src and dest to the end of block if necessary
if dir < 0 {
p := gins(arm.AMOVW, &src, &src)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w + int64(dir)
p = gins(arm.AMOVW, &dst, &dst)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w + int64(dir)
}
// move
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond |= arm.C_PBIT
ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond |= arm.C_PBIT
p = gins(arm.ACMP, &src, nil)
raddr(&nend, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
gc.Regfree(&nend)
} else {
var p *obj.Prog
for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond |= arm.C_PBIT
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond |= arm.C_PBIT
}
}
gc.Regfree(&dst)
gc.Regfree(&src)
gc.Regfree(&tmp)
}
This diff is collapsed.
...@@ -28,38 +28,9 @@ func Main() { ...@@ -28,38 +28,9 @@ func Main() {
gc.Thearch.ReservedRegs = resvd gc.Thearch.ReservedRegs = resvd
gc.Thearch.Betypeinit = betypeinit gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Cgen64 = cgen64
gc.Thearch.Cgen_hmul = cgen_hmul
gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Cmp64 = cmp64
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Getg = getg
gc.Thearch.Gins = gins gc.Thearch.Gins = gins
gc.Thearch.Ginscmp = ginscmp
gc.Thearch.Ginscon = ginscon
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
gc.Thearch.Cgenindex = cgenindex
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Blockcopy = blockcopy
gc.Thearch.Sudoaddable = sudoaddable
gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB
gc.Thearch.BtoR = BtoR
gc.Thearch.BtoF = BtoF
gc.Thearch.Optoas = optoas
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
gc.Thearch.SSARegToReg = ssaRegToReg gc.Thearch.SSARegToReg = ssaRegToReg
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
......
...@@ -111,440 +111,9 @@ func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int3 ...@@ -111,440 +111,9 @@ func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int3
return q return q
} }
/*
* generate high multiply
* res = (nl * nr) >> wordsize
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Ullman < nr.Ullman {
nl, nr = nr, nl
}
t := nl.Type
w := t.Width * 8
var n1 gc.Node
gc.Regalloc(&n1, t, res)
gc.Cgen(nl, &n1)
var n2 gc.Node
gc.Regalloc(&n2, t, nil)
gc.Cgen(nr, &n2)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16:
gins(optoas(gc.OMUL, t), &n2, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
case gc.TUINT8,
gc.TUINT16:
gins(optoas(gc.OMUL, t), &n2, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
// perform a long multiplication.
case gc.TINT32,
gc.TUINT32:
var p *obj.Prog
if t.IsSigned() {
p = gins(arm.AMULL, &n2, nil)
} else {
p = gins(arm.AMULLU, &n2, nil)
}
// n2 * n1 -> (n1 n2)
p.Reg = n1.Reg
p.To.Type = obj.TYPE_REGREG
p.To.Reg = n1.Reg
p.To.Offset = int64(n2.Reg)
default:
gc.Fatalf("cgen_hmul %v", t)
}
gc.Cgen(&n1, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
if op == gc.OLROT {
v := nr.Int64()
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
if w == 32 {
gc.Cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, nil)
gc.Cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
gc.Regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int64())
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && nl.Type.IsSigned() {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else {
gins(arm.AEOR, &n1, &n1)
}
} else {
if op == gc.ORSH && nl.Type.IsSigned() {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
} else {
gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
}
}
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
gc.Regfree(&n3)
} else {
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Regalloc(&n1, nr.Type, nil)
gc.Cgen(nr, &n1)
} else {
gc.Regalloc(&n1, nr.Type, nil)
gc.Cgen(nr, &n1)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
}
// test for shift being 0
gins(arm.ATST, &n1, nil)
p3 := gc.Gbranch(arm.ABEQ, nil, -1)
// test and fix up large shifts
// TODO: if(!bounded), don't emit some of this.
gc.Regalloc(&n3, tr, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
gmove(&t, &n3)
gins(arm.ACMP, &n1, &n3)
if op == gc.ORSH {
var p1 *obj.Prog
var p2 *obj.Prog
if nl.Type.IsSigned() {
p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
} else {
p1 = gins(arm.AEOR, &n2, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
}
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
} else {
p1 := gins(arm.AEOR, &n2, &n2)
p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
}
gc.Regfree(&n3)
gc.Patch(p3, gc.Pc)
// Left-shift of smaller word must be sign/zero-extended.
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n2, &n2)
}
gmove(&n2, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
if nl.Type.Align < 4 {
q = 0
c = w
}
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R1
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %v\n", p);
q--
}
}
if c > 4 {
// Loop to zero unaligned memory.
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(c)
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
c = 0
}
var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
//print("2. %v\n", p);
c--
}
gc.Regfree(&dst)
gc.Regfree(&nz)
}
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
var reg int
var p1 *obj.Prog
for p := firstp; p != nil; p = p.Link {
if p.As != obj.ACHECKNIL {
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
gc.Warnl(p.Lineno, "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatalf("invalid nil check %v", p)
}
reg = int(p.From.Reg)
// check is
// CMP arg, $0
// MOV.EQ arg, 0(arg)
p1 = gc.Ctxt.NewProg()
gc.Clearp(p1)
p1.Link = p.Link
p.Link = p1
p1.Lineno = p.Lineno
p1.Pc = 9999
p1.As = arm.AMOVW
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(reg)
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = int16(reg)
p1.To.Offset = 0
p1.Scond = arm.C_SCOND_EQ
p.As = arm.ACMP
p.From.Type = obj.TYPE_CONST
p.From.Reg = 0
p.From.Offset = 0
p.Reg = int16(reg)
}
}
func ginsnop() { func ginsnop() {
var r gc.Node var r gc.Node
gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0) gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
p := gins(arm.AAND, &r, &r) p := gins(arm.AAND, &r, &r)
p.Scond = arm.C_SCOND_EQ p.Scond = arm.C_SCOND_EQ
} }
/*
* generate
* as $c, n
*/
func ginscon(as obj.As, c int64, n *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
gmove(&n1, &n2)
gins(as, &n2, n)
gc.Regfree(&n2)
}
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if t.IsInteger() && n1.Op == gc.OLITERAL && n1.Int64() == 0 && n2.Op != gc.OLITERAL {
op = gc.Brrev(op)
n1, n2 = n2, n1
}
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
if t.IsInteger() && n2.Op == gc.OLITERAL && n2.Int64() == 0 {
gins(arm.ACMP, &r1, n2)
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
gins(optoas(gc.OCMP, t), &r1, &r2)
gc.Regfree(&g2)
gc.Regfree(&r2)
}
gc.Regfree(&g1)
gc.Regfree(&r1)
return gc.Gbranch(optoas(op, t), nil, likely)
}
// addr += index*width if possible.
func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
switch width {
case 2:
gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr)
return true
case 4:
gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr)
return true
case 8:
gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr)
return true
}
return false
}
// res = runtime.getg()
func getg(res *gc.Node) {
var n1 gc.Node
gc.Nodreg(&n1, res.Type, arm.REGG)
gmove(&n1, res)
}
This diff is collapsed.
This diff is collapsed.
...@@ -31,59 +31,6 @@ ...@@ -31,59 +31,6 @@
package arm package arm
import "cmd/internal/obj/arm" import "cmd/internal/obj/arm"
import "cmd/compile/internal/gc"
const (
NREGVAR = 32
)
var regname = []string{
".R0",
".R1",
".R2",
".R3",
".R4",
".R5",
".R6",
".R7",
".R8",
".R9",
".R10",
".R11",
".R12",
".R13",
".R14",
".R15",
".F0",
".F1",
".F2",
".F3",
".F4",
".F5",
".F6",
".F7",
".F8",
".F9",
".F10",
".F11",
".F12",
".F13",
".F14",
".F15",
}
func regnames(n *int) []string {
*n = NREGVAR
return regname
}
func excludedregs() uint64 {
return RtoB(arm.REGSP) | RtoB(arm.REGLINK) | RtoB(arm.REGPC)
}
func doregbits(r int) uint64 {
return 0
}
/* /*
* bit reg * bit reg
...@@ -116,21 +63,3 @@ func RtoB(r int) uint64 { ...@@ -116,21 +63,3 @@ func RtoB(r int) uint64 {
return 0 return 0
} }
func BtoR(b uint64) int {
// TODO Allow R0 and R1, but be careful with a 0 return
// TODO Allow R9. Only R10 is reserved now (just g, not m).
b &= 0x11fc // excluded R9 and R10 for m and g, but not R12
if b == 0 {
return 0
}
return gc.Bitno(b) + arm.REG_R0
}
func BtoF(b uint64) int {
b &= 0xfffc0000
if b == 0 {
return 0
}
return gc.Bitno(b) - 16 + arm.REG_F0
}
This diff is collapsed.
...@@ -29,38 +29,9 @@ func Main() { ...@@ -29,38 +29,9 @@ func Main() {
gc.Thearch.ReservedRegs = resvd gc.Thearch.ReservedRegs = resvd
gc.Thearch.Betypeinit = betypeinit gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Cgen_hmul = cgen_hmul
gc.Thearch.AddSetCarry = AddSetCarry
gc.Thearch.RightShiftWithCarry = RightShiftWithCarry
gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Dodiv = dodiv
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Getg = getg
gc.Thearch.Gins = gins gc.Thearch.Gins = gins
gc.Thearch.Ginscmp = ginscmp
gc.Thearch.Ginscon = ginscon
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Blockcopy = blockcopy
gc.Thearch.Sudoaddable = sudoaddable
gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB
gc.Thearch.BtoR = BtoR
gc.Thearch.BtoF = BtoF
gc.Thearch.Optoas = optoas
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
gc.Thearch.SSARegToReg = ssaRegToReg gc.Thearch.SSARegToReg = ssaRegToReg
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -302,34 +302,6 @@ func allocauto(ptxt *obj.Prog) { ...@@ -302,34 +302,6 @@ func allocauto(ptxt *obj.Prog) {
} }
} }
func Cgen_checknil(n *Node) {
if Disable_checknil != 0 {
return
}
// Ideally we wouldn't see any integer types here, but we do.
if n.Type == nil || (!n.Type.IsPtr() && !n.Type.IsInteger() && n.Type.Etype != TUNSAFEPTR) {
Dump("checknil", n)
Fatalf("bad checknil")
}
// Most architectures require that the address to be checked is
// in a register (it could be in memory).
needsReg := !Thearch.LinkArch.InFamily(sys.AMD64, sys.I386)
// Move the address to be checked into a register if necessary.
if (needsReg && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
var reg Node
Regalloc(&reg, Types[Tptr], n)
Cgen(n, &reg)
Thearch.Gins(obj.ACHECKNIL, &reg, nil)
Regfree(&reg)
return
}
Thearch.Gins(obj.ACHECKNIL, n, nil)
}
func compile(fn *Node) { func compile(fn *Node) {
if Newproc == nil { if Newproc == nil {
Newproc = Sysfunc("newproc") Newproc = Sysfunc("newproc")
......
This diff is collapsed.
This diff is collapsed.
...@@ -22,7 +22,6 @@ func TestSizeof(t *testing.T) { ...@@ -22,7 +22,6 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms _32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms _64bit uintptr // size on 64bit platforms
}{ }{
{Flow{}, 52, 88},
{Func{}, 96, 168}, {Func{}, 96, 168},
{Name{}, 52, 80}, {Name{}, 52, 80},
{Node{}, 92, 144}, {Node{}, 92, 144},
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment