Commit 0a94daa3 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/compile: funnel SSA Prog creation through SSAGenState

Step one in eliminating Prog-related globals.

Passes toolstash-check -all.

Updates #15756

Change-Id: I3b777fb5a7716f2d9da3067fbd94c28ca894a465
Reviewed-on: https://go-review.googlesource.com/38450
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarMatthew Dempsky <mdempsky@google.com>
parent 3b39f523
...@@ -107,8 +107,8 @@ func moveByType(t ssa.Type) obj.As { ...@@ -107,8 +107,8 @@ func moveByType(t ssa.Type) obj.As {
// dest := dest(To) op src(From) // dest := dest(To) op src(From)
// and also returns the created obj.Prog so it // and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc). // may be further adjusted (offset, scale, etc).
func opregreg(op obj.As, dest, src int16) *obj.Prog { func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = dest p.To.Reg = dest
...@@ -154,13 +154,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -154,13 +154,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
switch { switch {
case r == r1: case r == r1:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r2 p.From.Reg = r2
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case r == r2: case r == r2:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r1 p.From.Reg = r1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -172,7 +172,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -172,7 +172,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
asm = x86.ALEAL asm = x86.ALEAL
} }
p := gc.Prog(asm) p := s.Prog(asm)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r1 p.From.Reg = r1
p.From.Scale = 1 p.From.Scale = 1
...@@ -196,7 +196,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -196,7 +196,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
opregreg(v.Op.Asm(), r, v.Args[1].Reg()) opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU: case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
// Arg[0] (the dividend) is in AX. // Arg[0] (the dividend) is in AX.
...@@ -206,14 +206,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -206,14 +206,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Args[1].Reg() r := v.Args[1].Reg()
// Zero extend dividend. // Zero extend dividend.
c := gc.Prog(x86.AXORL) c := s.Prog(x86.AXORL)
c.From.Type = obj.TYPE_REG c.From.Type = obj.TYPE_REG
c.From.Reg = x86.REG_DX c.From.Reg = x86.REG_DX
c.To.Type = obj.TYPE_REG c.To.Type = obj.TYPE_REG
c.To.Reg = x86.REG_DX c.To.Reg = x86.REG_DX
// Issue divide. // Issue divide.
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
...@@ -229,46 +229,46 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -229,46 +229,46 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
var c *obj.Prog var c *obj.Prog
switch v.Op { switch v.Op {
case ssa.OpAMD64DIVQ: case ssa.OpAMD64DIVQ:
c = gc.Prog(x86.ACMPQ) c = s.Prog(x86.ACMPQ)
case ssa.OpAMD64DIVL: case ssa.OpAMD64DIVL:
c = gc.Prog(x86.ACMPL) c = s.Prog(x86.ACMPL)
case ssa.OpAMD64DIVW: case ssa.OpAMD64DIVW:
c = gc.Prog(x86.ACMPW) c = s.Prog(x86.ACMPW)
} }
c.From.Type = obj.TYPE_REG c.From.Type = obj.TYPE_REG
c.From.Reg = r c.From.Reg = r
c.To.Type = obj.TYPE_CONST c.To.Type = obj.TYPE_CONST
c.To.Offset = -1 c.To.Offset = -1
j1 := gc.Prog(x86.AJEQ) j1 := s.Prog(x86.AJEQ)
j1.To.Type = obj.TYPE_BRANCH j1.To.Type = obj.TYPE_BRANCH
// Sign extend dividend. // Sign extend dividend.
switch v.Op { switch v.Op {
case ssa.OpAMD64DIVQ: case ssa.OpAMD64DIVQ:
gc.Prog(x86.ACQO) s.Prog(x86.ACQO)
case ssa.OpAMD64DIVL: case ssa.OpAMD64DIVL:
gc.Prog(x86.ACDQ) s.Prog(x86.ACDQ)
case ssa.OpAMD64DIVW: case ssa.OpAMD64DIVW:
gc.Prog(x86.ACWD) s.Prog(x86.ACWD)
} }
// Issue divide. // Issue divide.
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
// Skip over -1 fixup code. // Skip over -1 fixup code.
j2 := gc.Prog(obj.AJMP) j2 := s.Prog(obj.AJMP)
j2.To.Type = obj.TYPE_BRANCH j2.To.Type = obj.TYPE_BRANCH
// Issue -1 fixup code. // Issue -1 fixup code.
// n / -1 = -n // n / -1 = -n
n1 := gc.Prog(x86.ANEGQ) n1 := s.Prog(x86.ANEGQ)
n1.To.Type = obj.TYPE_REG n1.To.Type = obj.TYPE_REG
n1.To.Reg = x86.REG_AX n1.To.Reg = x86.REG_AX
// n % -1 == 0 // n % -1 == 0
n2 := gc.Prog(x86.AXORL) n2 := s.Prog(x86.AXORL)
n2.From.Type = obj.TYPE_REG n2.From.Type = obj.TYPE_REG
n2.From.Reg = x86.REG_DX n2.From.Reg = x86.REG_DX
n2.To.Type = obj.TYPE_REG n2.To.Type = obj.TYPE_REG
...@@ -287,14 +287,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -287,14 +287,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Arg[0] is already in AX as it's the only register we allow // Arg[0] is already in AX as it's the only register we allow
// and DX is the only output we care about (the high bits) // and DX is the only output we care about (the high bits)
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
// IMULB puts the high portion in AH instead of DL, // IMULB puts the high portion in AH instead of DL,
// so move it to DL for consistency // so move it to DL for consistency
if v.Type.Size() == 1 { if v.Type.Size() == 1 {
m := gc.Prog(x86.AMOVB) m := s.Prog(x86.AMOVB)
m.From.Type = obj.TYPE_REG m.From.Type = obj.TYPE_REG
m.From.Reg = x86.REG_AH m.From.Reg = x86.REG_AH
m.To.Type = obj.TYPE_REG m.To.Type = obj.TYPE_REG
...@@ -304,14 +304,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -304,14 +304,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64MULQU2: case ssa.OpAMD64MULQU2:
// Arg[0] is already in AX as it's the only register we allow // Arg[0] is already in AX as it's the only register we allow
// results hi in DX, lo in AX // results hi in DX, lo in AX
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
case ssa.OpAMD64DIVQU2: case ssa.OpAMD64DIVQU2:
// Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow // Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow
// results q in AX, r in DX // results q in AX, r in DX
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
...@@ -323,12 +323,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -323,12 +323,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(x86.AADDQ) p := s.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p = gc.Prog(x86.ARCRQ) p = s.Prog(x86.ARCRQ)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -350,7 +350,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -350,7 +350,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
asm = x86.AINCL asm = x86.AINCL
} }
p := gc.Prog(asm) p := s.Prog(asm)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
return return
...@@ -362,12 +362,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -362,12 +362,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
asm = x86.ADECL asm = x86.ADECL
} }
p := gc.Prog(asm) p := s.Prog(asm)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
return return
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -380,7 +380,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -380,7 +380,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
asm = x86.ALEAL asm = x86.ALEAL
} }
p := gc.Prog(asm) p := s.Prog(asm)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = a p.From.Reg = a
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
...@@ -392,7 +392,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -392,7 +392,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -403,7 +403,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -403,7 +403,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -426,14 +426,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -426,14 +426,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask: case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
r := v.Reg() r := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -441,7 +441,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -441,7 +441,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8: case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
r := v.Args[0].Reg() r := v.Args[0].Reg()
i := v.Args[1].Reg() i := v.Args[1].Reg()
p := gc.Prog(x86.ALEAQ) p := s.Prog(x86.ALEAQ)
switch v.Op { switch v.Op {
case ssa.OpAMD64LEAQ1: case ssa.OpAMD64LEAQ1:
p.From.Scale = 1 p.From.Scale = 1
...@@ -462,7 +462,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -462,7 +462,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL: case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -471,27 +471,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -471,27 +471,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB, case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB, ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB,
ssa.OpAMD64BTL, ssa.OpAMD64BTQ: ssa.OpAMD64BTL, ssa.OpAMD64BTQ:
opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD: case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
// Go assembler has swapped operands for UCOMISx relative to CMP, // Go assembler has swapped operands for UCOMISx relative to CMP,
// must account for that right here. // must account for that right here.
opregreg(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg()) opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst: case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst, case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst: ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
x := v.Reg() x := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -503,20 +503,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -503,20 +503,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst: case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
x := v.Reg() x := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x p.To.Reg = x
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVOload: case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVOload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8: case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -525,7 +525,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -525,7 +525,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4: case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -534,7 +534,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -534,7 +534,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpAMD64MOVWloadidx2: case ssa.OpAMD64MOVWloadidx2:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -548,7 +548,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -548,7 +548,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == x86.REG_SP { if i == x86.REG_SP {
r, i = i, r r, i = i, r
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r p.From.Reg = r
p.From.Scale = 1 p.From.Scale = 1
...@@ -557,14 +557,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -557,14 +557,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore: case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8: case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -573,7 +573,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -573,7 +573,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = v.Args[1].Reg() p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4: case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -582,7 +582,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -582,7 +582,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = v.Args[1].Reg() p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVWstoreidx2: case ssa.OpAMD64MOVWstoreidx2:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -596,7 +596,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -596,7 +596,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == x86.REG_SP { if i == x86.REG_SP {
r, i = i, r r, i = i, r
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -605,7 +605,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -605,7 +605,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = i p.To.Index = i
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst: case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff() sc := v.AuxValAndOff()
p.From.Offset = sc.Val() p.From.Offset = sc.Val()
...@@ -613,7 +613,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -613,7 +613,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off()) gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1: case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff() sc := v.AuxValAndOff()
p.From.Offset = sc.Val() p.From.Offset = sc.Val()
...@@ -639,17 +639,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -639,17 +639,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX, case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ, ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS: ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg()) opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS: case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS:
r := v.Reg() r := v.Reg()
// Break false dependency on destination register. // Break false dependency on destination register.
opregreg(x86.AXORPS, r, r) opregreg(s, x86.AXORPS, r, r)
opregreg(v.Op.Asm(), r, v.Args[0].Reg()) opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
case ssa.OpAMD64ADDQmem, ssa.OpAMD64ADDLmem, ssa.OpAMD64SUBQmem, ssa.OpAMD64SUBLmem, case ssa.OpAMD64ADDQmem, ssa.OpAMD64ADDLmem, ssa.OpAMD64SUBQmem, ssa.OpAMD64SUBLmem,
ssa.OpAMD64ANDQmem, ssa.OpAMD64ANDLmem, ssa.OpAMD64ORQmem, ssa.OpAMD64ORLmem, ssa.OpAMD64ANDQmem, ssa.OpAMD64ANDLmem, ssa.OpAMD64ORQmem, ssa.OpAMD64ORLmem,
ssa.OpAMD64XORQmem, ssa.OpAMD64XORLmem, ssa.OpAMD64ADDSDmem, ssa.OpAMD64ADDSSmem, ssa.OpAMD64XORQmem, ssa.OpAMD64XORLmem, ssa.OpAMD64ADDSDmem, ssa.OpAMD64ADDSSmem,
ssa.OpAMD64SUBSDmem, ssa.OpAMD64SUBSSmem, ssa.OpAMD64MULSDmem, ssa.OpAMD64MULSSmem: ssa.OpAMD64SUBSDmem, ssa.OpAMD64SUBSSmem, ssa.OpAMD64MULSDmem, ssa.OpAMD64MULSSmem:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -663,13 +663,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -663,13 +663,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
adj := duffAdj(v.AuxInt) adj := duffAdj(v.AuxInt)
var p *obj.Prog var p *obj.Prog
if adj != 0 { if adj != 0 {
p = gc.Prog(x86.AADDQ) p = s.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = adj p.From.Offset = adj
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_DI p.To.Reg = x86.REG_DI
} }
p = gc.Prog(obj.ADUFFZERO) p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffzero p.To.Sym = gc.Duffzero
p.To.Offset = off p.To.Offset = off
...@@ -678,9 +678,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -678,9 +678,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("MOVOconst can only do constant=0") v.Fatalf("MOVOconst can only do constant=0")
} }
r := v.Reg() r := v.Reg()
opregreg(x86.AXORPS, r, r) opregreg(s, x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY: case ssa.OpAMD64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY) p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffcopy p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
...@@ -692,14 +692,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -692,14 +692,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg() x := v.Args[0].Reg()
y := v.Reg() y := v.Reg()
if x != y { if x != y {
opregreg(moveByType(v.Type), y, x) opregreg(s, moveByType(v.Type), y, x)
} }
case ssa.OpLoadReg: case ssa.OpLoadReg:
if v.Type.IsFlags() { if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString()) v.Fatalf("load flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(loadByType(v.Type)) p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
...@@ -709,7 +709,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -709,7 +709,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString()) v.Fatalf("store flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(storeByType(v.Type)) p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -722,7 +722,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -722,7 +722,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// near CanUse1InsnTLS for a detailed explanation of these instructions. // near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) { if x86.CanUse1InsnTLS(gc.Ctxt) {
// MOVQ (TLS), r // MOVQ (TLS), r
p := gc.Prog(x86.AMOVQ) p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -730,12 +730,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -730,12 +730,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
// MOVQ TLS, r // MOVQ TLS, r
// MOVQ (r)(TLS*1), r // MOVQ (r)(TLS*1), r
p := gc.Prog(x86.AMOVQ) p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
q := gc.Prog(x86.AMOVQ) q := s.Prog(x86.AMOVQ)
q.From.Type = obj.TYPE_MEM q.From.Type = obj.TYPE_MEM
q.From.Reg = r q.From.Reg = r
q.From.Index = x86.REG_TLS q.From.Index = x86.REG_TLS
...@@ -752,17 +752,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -752,17 +752,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.OpAMD64BSFQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRQ, ssa.OpAMD64BSRL: case ssa.OpAMD64BSFQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRQ, ssa.OpAMD64BSRL:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
case ssa.OpAMD64SQRTSD: case ssa.OpAMD64SQRTSD:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -774,29 +774,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -774,29 +774,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpAMD64SETB, ssa.OpAMD64SETBE, ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN, ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
ssa.OpAMD64SETA, ssa.OpAMD64SETAE: ssa.OpAMD64SETA, ssa.OpAMD64SETAE:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpAMD64SETNEF: case ssa.OpAMD64SETNEF:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
q := gc.Prog(x86.ASETPS) q := s.Prog(x86.ASETPS)
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX q.To.Reg = x86.REG_AX
// ORL avoids partial register write and is smaller than ORQ, used by old compiler // ORL avoids partial register write and is smaller than ORQ, used by old compiler
opregreg(x86.AORL, v.Reg(), x86.REG_AX) opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
case ssa.OpAMD64SETEQF: case ssa.OpAMD64SETEQF:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
q := gc.Prog(x86.ASETPC) q := s.Prog(x86.ASETPC)
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX q.To.Reg = x86.REG_AX
// ANDL avoids partial register write and is smaller than ANDQ, used by old compiler // ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
opregreg(x86.AANDL, v.Reg(), x86.REG_AX) opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
case ssa.OpAMD64InvertFlags: case ssa.OpAMD64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
...@@ -805,11 +805,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -805,11 +805,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64: case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
case ssa.OpAMD64REPSTOSQ: case ssa.OpAMD64REPSTOSQ:
gc.Prog(x86.AREP) s.Prog(x86.AREP)
gc.Prog(x86.ASTOSQ) s.Prog(x86.ASTOSQ)
case ssa.OpAMD64REPMOVSQ: case ssa.OpAMD64REPMOVSQ:
gc.Prog(x86.AREP) s.Prog(x86.AREP)
gc.Prog(x86.AMOVSQ) s.Prog(x86.AMOVSQ)
case ssa.OpAMD64LoweredNilCheck: case ssa.OpAMD64LoweredNilCheck:
// Issue a load which will fault if the input is nil. // Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg). // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
...@@ -817,7 +817,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -817,7 +817,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// but it doesn't have false dependency on AX. // but it doesn't have false dependency on AX.
// Or maybe allocate an output register and use MOVL (reg),reg2 ? // Or maybe allocate an output register and use MOVL (reg),reg2 ?
// That trades clobbering flags for clobbering a register. // That trades clobbering flags for clobbering a register.
p := gc.Prog(x86.ATESTB) p := s.Prog(x86.ATESTB)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -827,7 +827,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -827,7 +827,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.Warnl(v.Pos, "generated nil check") gc.Warnl(v.Pos, "generated nil check")
} }
case ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload: case ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -838,7 +838,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -838,7 +838,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString()) v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -849,8 +849,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -849,8 +849,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString()) v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
} }
gc.Prog(x86.ALOCK) s.Prog(x86.ALOCK)
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -860,19 +860,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -860,19 +860,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Args[1].Reg() != x86.REG_AX { if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString()) v.Fatalf("input[1] not in AX %s", v.LongString())
} }
gc.Prog(x86.ALOCK) s.Prog(x86.ALOCK)
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
p = gc.Prog(x86.ASETEQ) p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
case ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock: case ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock:
gc.Prog(x86.ALOCK) s.Prog(x86.ALOCK)
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -913,7 +913,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -913,7 +913,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind { switch b.Kind {
case ssa.BlockPlain: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
...@@ -921,25 +921,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -921,25 +921,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in rax: // defer returns in rax:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(x86.ATESTL) p := s.Prog(x86.ATESTL)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX p.To.Reg = x86.REG_AX
p = gc.Prog(x86.AJNE) p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockExit: case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet: case ssa.BlockRet:
gc.Prog(obj.ARET) s.Prog(obj.ARET)
case ssa.BlockRetJmp: case ssa.BlockRetJmp:
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym) p.To.Sym = b.Aux.(*obj.LSym)
...@@ -960,19 +960,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -960,19 +960,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog var p *obj.Prog
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
p = gc.Prog(jmp.invasm) p = s.Prog(jmp.invasm)
likely *= -1 likely *= -1
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block(): case b.Succs[1].Block():
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default: default:
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP) q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
......
...@@ -86,11 +86,11 @@ func makeshift(reg int16, typ int64, s int64) shift { ...@@ -86,11 +86,11 @@ func makeshift(reg int16, typ int64, s int64) shift {
return shift(int64(reg&0xf) | typ | (s&31)<<7) return shift(int64(reg&0xf) | typ | (s&31)<<7)
} }
// genshift generates a Prog for r = r0 op (r1 shifted by s) // genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog { func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := gc.Prog(as) p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeshift(r1, typ, s)) p.From.Offset = int64(makeshift(r1, typ, n))
p.Reg = r0 p.Reg = r0
if r != 0 { if r != 0 {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -105,8 +105,8 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift { ...@@ -105,8 +105,8 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift {
} }
// genregshift generates a Prog for r = r0 op (r1 shifted by r2) // genregshift generates a Prog for r = r0 op (r1 shifted by r2)
func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog { func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := gc.Prog(as) p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeregshift(r1, typ, r2)) p.From.Offset = int64(makeregshift(r1, typ, r2))
p.Reg = r0 p.Reg = r0
...@@ -139,7 +139,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -139,7 +139,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
panic("bad float size") panic("bad float size")
} }
} }
p := gc.Prog(as) p := s.Prog(as)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x p.From.Reg = x
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -154,7 +154,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -154,7 +154,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("load flags not implemented: %v", v.LongString()) v.Fatalf("load flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(loadByType(v.Type)) p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
...@@ -163,7 +163,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -163,7 +163,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString()) v.Fatalf("store flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(storeByType(v.Type)) p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -188,7 +188,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -188,7 +188,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg() r := v.Reg()
r1 := v.Args[0].Reg() r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r2 p.From.Reg = r2
p.Reg = r1 p.Reg = r1
...@@ -199,7 +199,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -199,7 +199,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg0() r := v.Reg0()
r1 := v.Args[0].Reg() r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r2 p.From.Reg = r2
...@@ -212,7 +212,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -212,7 +212,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg() r := v.Reg()
r1 := v.Args[0].Reg() r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r2 p.From.Reg = r2
p.Reg = r1 p.Reg = r1
...@@ -227,14 +227,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -227,14 +227,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg() r := v.Reg()
r1 := v.Args[0].Reg() r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
p := gc.Prog(arm.ASRA) p := s.Prog(arm.ASRA)
p.Scond = arm.C_SCOND_HS p.Scond = arm.C_SCOND_HS
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 31 p.From.Offset = 31
p.Reg = r1 p.Reg = r1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
p = gc.Prog(arm.ASRA) p = s.Prog(arm.ASRA)
p.Scond = arm.C_SCOND_LO p.Scond = arm.C_SCOND_LO
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r2 p.From.Reg = r2
...@@ -254,7 +254,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -254,7 +254,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMSLLconst, ssa.OpARMSLLconst,
ssa.OpARMSRLconst, ssa.OpARMSRLconst,
ssa.OpARMSRAconst: ssa.OpARMSRAconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
...@@ -263,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -263,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMADDSconst, case ssa.OpARMADDSconst,
ssa.OpARMSUBSconst, ssa.OpARMSUBSconst,
ssa.OpARMRSBSconst: ssa.OpARMRSBSconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
...@@ -271,7 +271,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -271,7 +271,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
case ssa.OpARMSRRconst: case ssa.OpARMSRRconst:
genshift(arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
case ssa.OpARMADDshiftLL, case ssa.OpARMADDshiftLL,
ssa.OpARMADCshiftLL, ssa.OpARMADCshiftLL,
ssa.OpARMSUBshiftLL, ssa.OpARMSUBshiftLL,
...@@ -282,11 +282,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -282,11 +282,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftLL, ssa.OpARMORshiftLL,
ssa.OpARMXORshiftLL, ssa.OpARMXORshiftLL,
ssa.OpARMBICshiftLL: ssa.OpARMBICshiftLL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMADDSshiftLL, case ssa.OpARMADDSshiftLL,
ssa.OpARMSUBSshiftLL, ssa.OpARMSUBSshiftLL,
ssa.OpARMRSBSshiftLL: ssa.OpARMRSBSshiftLL:
p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt) p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRL, case ssa.OpARMADDshiftRL,
ssa.OpARMADCshiftRL, ssa.OpARMADCshiftRL,
...@@ -298,11 +298,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -298,11 +298,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftRL, ssa.OpARMORshiftRL,
ssa.OpARMXORshiftRL, ssa.OpARMXORshiftRL,
ssa.OpARMBICshiftRL: ssa.OpARMBICshiftRL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMADDSshiftRL, case ssa.OpARMADDSshiftRL,
ssa.OpARMSUBSshiftRL, ssa.OpARMSUBSshiftRL,
ssa.OpARMRSBSshiftRL: ssa.OpARMRSBSshiftRL:
p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt) p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRA, case ssa.OpARMADDshiftRA,
ssa.OpARMADCshiftRA, ssa.OpARMADCshiftRA,
...@@ -314,26 +314,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -314,26 +314,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftRA, ssa.OpARMORshiftRA,
ssa.OpARMXORshiftRA, ssa.OpARMXORshiftRA,
ssa.OpARMBICshiftRA: ssa.OpARMBICshiftRA:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMADDSshiftRA, case ssa.OpARMADDSshiftRA,
ssa.OpARMSUBSshiftRA, ssa.OpARMSUBSshiftRA,
ssa.OpARMRSBSshiftRA: ssa.OpARMRSBSshiftRA:
p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt) p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMXORshiftRR: case ssa.OpARMXORshiftRR:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
case ssa.OpARMMVNshiftLL: case ssa.OpARMMVNshiftLL:
genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMMVNshiftRL: case ssa.OpARMMVNshiftRL:
genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMMVNshiftRA: case ssa.OpARMMVNshiftRA:
genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMMVNshiftLLreg: case ssa.OpARMMVNshiftLLreg:
genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL) genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
case ssa.OpARMMVNshiftRLreg: case ssa.OpARMMVNshiftRLreg:
genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR) genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
case ssa.OpARMMVNshiftRAreg: case ssa.OpARMMVNshiftRAreg:
genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR) genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
case ssa.OpARMADDshiftLLreg, case ssa.OpARMADDshiftLLreg,
ssa.OpARMADCshiftLLreg, ssa.OpARMADCshiftLLreg,
ssa.OpARMSUBshiftLLreg, ssa.OpARMSUBshiftLLreg,
...@@ -344,11 +344,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -344,11 +344,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftLLreg, ssa.OpARMORshiftLLreg,
ssa.OpARMXORshiftLLreg, ssa.OpARMXORshiftLLreg,
ssa.OpARMBICshiftLLreg: ssa.OpARMBICshiftLLreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL) genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
case ssa.OpARMADDSshiftLLreg, case ssa.OpARMADDSshiftLLreg,
ssa.OpARMSUBSshiftLLreg, ssa.OpARMSUBSshiftLLreg,
ssa.OpARMRSBSshiftLLreg: ssa.OpARMRSBSshiftLLreg:
p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL) p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRLreg, case ssa.OpARMADDshiftRLreg,
ssa.OpARMADCshiftRLreg, ssa.OpARMADCshiftRLreg,
...@@ -360,11 +360,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -360,11 +360,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftRLreg, ssa.OpARMORshiftRLreg,
ssa.OpARMXORshiftRLreg, ssa.OpARMXORshiftRLreg,
ssa.OpARMBICshiftRLreg: ssa.OpARMBICshiftRLreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR) genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
case ssa.OpARMADDSshiftRLreg, case ssa.OpARMADDSshiftRLreg,
ssa.OpARMSUBSshiftRLreg, ssa.OpARMSUBSshiftRLreg,
ssa.OpARMRSBSshiftRLreg: ssa.OpARMRSBSshiftRLreg:
p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR) p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRAreg, case ssa.OpARMADDshiftRAreg,
ssa.OpARMADCshiftRAreg, ssa.OpARMADCshiftRAreg,
...@@ -376,16 +376,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -376,16 +376,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftRAreg, ssa.OpARMORshiftRAreg,
ssa.OpARMXORshiftRAreg, ssa.OpARMXORshiftRAreg,
ssa.OpARMBICshiftRAreg: ssa.OpARMBICshiftRAreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR) genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
case ssa.OpARMADDSshiftRAreg, case ssa.OpARMADDSshiftRAreg,
ssa.OpARMSUBSshiftRAreg, ssa.OpARMSUBSshiftRAreg,
ssa.OpARMRSBSshiftRAreg: ssa.OpARMRSBSshiftRAreg:
p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR) p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMHMUL, case ssa.OpARMHMUL,
ssa.OpARMHMULU: ssa.OpARMHMULU:
// 32-bit high multiplication // 32-bit high multiplication
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
...@@ -394,7 +394,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -394,7 +394,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
case ssa.OpARMMULLU: case ssa.OpARMMULLU:
// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1 // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
...@@ -402,7 +402,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -402,7 +402,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg0() // high 32-bit p.To.Reg = v.Reg0() // high 32-bit
p.To.Offset = int64(v.Reg1()) // low 32-bit p.To.Offset = int64(v.Reg1()) // low 32-bit
case ssa.OpARMMULA: case ssa.OpARMMULA:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
...@@ -410,14 +410,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -410,14 +410,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() // result p.To.Reg = v.Reg() // result
p.To.Offset = int64(v.Args[2].Reg()) // addend p.To.Offset = int64(v.Args[2].Reg()) // addend
case ssa.OpARMMOVWconst: case ssa.OpARMMOVWconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpARMMOVFconst, case ssa.OpARMMOVFconst,
ssa.OpARMMOVDconst: ssa.OpARMMOVDconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -428,7 +428,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -428,7 +428,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMTEQ, ssa.OpARMTEQ,
ssa.OpARMCMPF, ssa.OpARMCMPF,
ssa.OpARMCMPD: ssa.OpARMCMPD:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
// Special layout in ARM assembly // Special layout in ARM assembly
// Comparing to x86, the operands of ARM's CMP are reversed. // Comparing to x86, the operands of ARM's CMP are reversed.
...@@ -439,29 +439,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -439,29 +439,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMTSTconst, ssa.OpARMTSTconst,
ssa.OpARMTEQconst: ssa.OpARMTEQconst:
// Special layout in ARM assembly // Special layout in ARM assembly
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
case ssa.OpARMCMPF0, case ssa.OpARMCMPF0,
ssa.OpARMCMPD0: ssa.OpARMCMPD0:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
case ssa.OpARMCMPshiftLL: case ssa.OpARMCMPshiftLL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMCMPshiftRL: case ssa.OpARMCMPshiftRL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMCMPshiftRA: case ssa.OpARMCMPshiftRA:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMCMPshiftLLreg: case ssa.OpARMCMPshiftLLreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
case ssa.OpARMCMPshiftRLreg: case ssa.OpARMCMPshiftRLreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
case ssa.OpARMCMPshiftRAreg: case ssa.OpARMCMPshiftRAreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
case ssa.OpARMMOVWaddr: case ssa.OpARMMOVWaddr:
p := gc.Prog(arm.AMOVW) p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
...@@ -498,7 +498,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -498,7 +498,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMMOVWload, ssa.OpARMMOVWload,
ssa.OpARMMOVFload, ssa.OpARMMOVFload,
ssa.OpARMMOVDload: ssa.OpARMMOVDload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -509,7 +509,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -509,7 +509,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMMOVWstore, ssa.OpARMMOVWstore,
ssa.OpARMMOVFstore, ssa.OpARMMOVFstore,
ssa.OpARMMOVDstore: ssa.OpARMMOVDstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -519,33 +519,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -519,33 +519,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// this is just shift 0 bits // this is just shift 0 bits
fallthrough fallthrough
case ssa.OpARMMOVWloadshiftLL: case ssa.OpARMMOVWloadshiftLL:
p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWloadshiftRL: case ssa.OpARMMOVWloadshiftRL:
p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWloadshiftRA: case ssa.OpARMMOVWloadshiftRA:
p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWstoreidx: case ssa.OpARMMOVWstoreidx:
// this is just shift 0 bits // this is just shift 0 bits
fallthrough fallthrough
case ssa.OpARMMOVWstoreshiftLL: case ssa.OpARMMOVWstoreshiftLL:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT p.To.Type = obj.TYPE_SHIFT
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt)) p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
case ssa.OpARMMOVWstoreshiftRL: case ssa.OpARMMOVWstoreshiftRL:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT p.To.Type = obj.TYPE_SHIFT
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt)) p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
case ssa.OpARMMOVWstoreshiftRA: case ssa.OpARMMOVWstoreshiftRA:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT p.To.Type = obj.TYPE_SHIFT
...@@ -570,7 +570,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -570,7 +570,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() { if v.Reg() == v.Args[0].Reg() {
return return
} }
p := gc.Prog(arm.AMOVW) p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -591,7 +591,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -591,7 +591,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMMOVDW, ssa.OpARMMOVDW,
ssa.OpARMMOVFD, ssa.OpARMMOVFD,
ssa.OpARMMOVDF: ssa.OpARMMOVDF:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -600,21 +600,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -600,21 +600,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMMOVWUD, ssa.OpARMMOVWUD,
ssa.OpARMMOVFWU, ssa.OpARMMOVFWU,
ssa.OpARMMOVDWU: ssa.OpARMMOVDWU:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.Scond = arm.C_UBIT p.Scond = arm.C_UBIT
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpARMCMOVWHSconst: case ssa.OpARMCMOVWHSconst:
p := gc.Prog(arm.AMOVW) p := s.Prog(arm.AMOVW)
p.Scond = arm.C_SCOND_HS p.Scond = arm.C_SCOND_HS
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpARMCMOVWLSconst: case ssa.OpARMCMOVWLSconst:
p := gc.Prog(arm.AMOVW) p := s.Prog(arm.AMOVW)
p.Scond = arm.C_SCOND_LS p.Scond = arm.C_SCOND_LS
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
...@@ -623,20 +623,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -623,20 +623,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter, ssa.OpARMCALLudiv: case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter, ssa.OpARMCALLudiv:
s.Call(v) s.Call(v)
case ssa.OpARMDUFFZERO: case ssa.OpARMDUFFZERO:
p := gc.Prog(obj.ADUFFZERO) p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY: case ssa.OpARMDUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY) p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck: case ssa.OpARMLoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(arm.AMOVB) p := s.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -665,18 +665,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -665,18 +665,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1 sz = 1
mov = arm.AMOVB mov = arm.AMOVB
} }
p := gc.Prog(mov) p := s.Prog(mov)
p.Scond = arm.C_PBIT p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REG_R1 p.To.Reg = arm.REG_R1
p.To.Offset = sz p.To.Offset = sz
p2 := gc.Prog(arm.ACMP) p2 := s.Prog(arm.ACMP)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg() p2.From.Reg = v.Args[1].Reg()
p2.Reg = arm.REG_R1 p2.Reg = arm.REG_R1
p3 := gc.Prog(arm.ABLE) p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) gc.Patch(p3, p)
case ssa.OpARMLoweredMove: case ssa.OpARMLoweredMove:
...@@ -699,25 +699,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -699,25 +699,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1 sz = 1
mov = arm.AMOVB mov = arm.AMOVB
} }
p := gc.Prog(mov) p := s.Prog(mov)
p.Scond = arm.C_PBIT p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = arm.REG_R1 p.From.Reg = arm.REG_R1
p.From.Offset = sz p.From.Offset = sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP p.To.Reg = arm.REGTMP
p2 := gc.Prog(mov) p2 := s.Prog(mov)
p2.Scond = arm.C_PBIT p2.Scond = arm.C_PBIT
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm.REGTMP p2.From.Reg = arm.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm.REG_R2 p2.To.Reg = arm.REG_R2
p2.To.Offset = sz p2.To.Offset = sz
p3 := gc.Prog(arm.ACMP) p3 := s.Prog(arm.ACMP)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg() p3.From.Reg = v.Args[2].Reg()
p3.Reg = arm.REG_R1 p3.Reg = arm.REG_R1
p4 := gc.Prog(arm.ABLE) p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) gc.Patch(p4, p)
case ssa.OpARMEqual, case ssa.OpARMEqual,
...@@ -732,12 +732,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -732,12 +732,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMGreaterEqualU: ssa.OpARMGreaterEqualU:
// generate boolean values // generate boolean values
// use conditional move // use conditional move
p := gc.Prog(arm.AMOVW) p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 0 p.From.Offset = 0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
p = gc.Prog(arm.AMOVW) p = s.Prog(arm.AMOVW)
p.Scond = condBits[v.Op] p.Scond = condBits[v.Op]
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
...@@ -791,7 +791,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -791,7 +791,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind { switch b.Kind {
case ssa.BlockPlain: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
...@@ -800,27 +800,27 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -800,27 +800,27 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R0: // defer returns in R0:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(arm.ACMP) p := s.Prog(arm.ACMP)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 0 p.From.Offset = 0
p.Reg = arm.REG_R0 p.Reg = arm.REG_R0
p = gc.Prog(arm.ABNE) p = s.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockExit: case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet: case ssa.BlockRet:
gc.Prog(obj.ARET) s.Prog(obj.ARET)
case ssa.BlockRetJmp: case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET) p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym) p.To.Sym = b.Aux.(*obj.LSym)
...@@ -834,18 +834,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -834,18 +834,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog var p *obj.Prog
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
p = gc.Prog(jmp.invasm) p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block(): case b.Succs[1].Block():
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default: default:
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP) q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
......
...@@ -78,11 +78,11 @@ func makeshift(reg int16, typ int64, s int64) int64 { ...@@ -78,11 +78,11 @@ func makeshift(reg int16, typ int64, s int64) int64 {
return int64(reg&31)<<16 | typ | (s&63)<<10 return int64(reg&31)<<16 | typ | (s&63)<<10
} }
// genshift generates a Prog for r = r0 op (r1 shifted by s) // genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog { func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := gc.Prog(as) p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT p.From.Type = obj.TYPE_SHIFT
p.From.Offset = makeshift(r1, typ, s) p.From.Offset = makeshift(r1, typ, n)
p.Reg = r0 p.Reg = r0
if r != 0 { if r != 0 {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -113,7 +113,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -113,7 +113,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
panic("bad float size") panic("bad float size")
} }
} }
p := gc.Prog(as) p := s.Prog(as)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x p.From.Reg = x
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -128,7 +128,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -128,7 +128,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("load flags not implemented: %v", v.LongString()) v.Fatalf("load flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(loadByType(v.Type)) p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
...@@ -137,7 +137,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -137,7 +137,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString()) v.Fatalf("store flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(storeByType(v.Type)) p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -175,7 +175,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -175,7 +175,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg() r := v.Reg()
r1 := v.Args[0].Reg() r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r2 p.From.Reg = r2
p.Reg = r1 p.Reg = r1
...@@ -192,7 +192,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -192,7 +192,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64SRAconst, ssa.OpARM64SRAconst,
ssa.OpARM64RORconst, ssa.OpARM64RORconst,
ssa.OpARM64RORWconst: ssa.OpARM64RORWconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
...@@ -204,30 +204,30 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -204,30 +204,30 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64ORshiftLL, ssa.OpARM64ORshiftLL,
ssa.OpARM64XORshiftLL, ssa.OpARM64XORshiftLL,
ssa.OpARM64BICshiftLL: ssa.OpARM64BICshiftLL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
case ssa.OpARM64ADDshiftRL, case ssa.OpARM64ADDshiftRL,
ssa.OpARM64SUBshiftRL, ssa.OpARM64SUBshiftRL,
ssa.OpARM64ANDshiftRL, ssa.OpARM64ANDshiftRL,
ssa.OpARM64ORshiftRL, ssa.OpARM64ORshiftRL,
ssa.OpARM64XORshiftRL, ssa.OpARM64XORshiftRL,
ssa.OpARM64BICshiftRL: ssa.OpARM64BICshiftRL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64ADDshiftRA, case ssa.OpARM64ADDshiftRA,
ssa.OpARM64SUBshiftRA, ssa.OpARM64SUBshiftRA,
ssa.OpARM64ANDshiftRA, ssa.OpARM64ANDshiftRA,
ssa.OpARM64ORshiftRA, ssa.OpARM64ORshiftRA,
ssa.OpARM64XORshiftRA, ssa.OpARM64XORshiftRA,
ssa.OpARM64BICshiftRA: ssa.OpARM64BICshiftRA:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64MOVDconst: case ssa.OpARM64MOVDconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpARM64FMOVSconst, case ssa.OpARM64FMOVSconst,
ssa.OpARM64FMOVDconst: ssa.OpARM64FMOVDconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -238,7 +238,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -238,7 +238,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64CMNW, ssa.OpARM64CMNW,
ssa.OpARM64FCMPS, ssa.OpARM64FCMPS,
ssa.OpARM64FCMPD: ssa.OpARM64FCMPD:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
...@@ -246,18 +246,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -246,18 +246,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64CMPWconst, ssa.OpARM64CMPWconst,
ssa.OpARM64CMNconst, ssa.OpARM64CMNconst,
ssa.OpARM64CMNWconst: ssa.OpARM64CMNWconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
case ssa.OpARM64CMPshiftLL: case ssa.OpARM64CMPshiftLL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
case ssa.OpARM64CMPshiftRL: case ssa.OpARM64CMPshiftRL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64CMPshiftRA: case ssa.OpARM64CMPshiftRA:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt) genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64MOVDaddr: case ssa.OpARM64MOVDaddr:
p := gc.Prog(arm64.AMOVD) p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
...@@ -295,7 +295,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -295,7 +295,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64MOVDload, ssa.OpARM64MOVDload,
ssa.OpARM64FMOVSload, ssa.OpARM64FMOVSload,
ssa.OpARM64FMOVDload: ssa.OpARM64FMOVDload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -303,7 +303,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -303,7 +303,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpARM64LDAR, case ssa.OpARM64LDAR,
ssa.OpARM64LDARW: ssa.OpARM64LDARW:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -317,7 +317,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -317,7 +317,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64FMOVDstore, ssa.OpARM64FMOVDstore,
ssa.OpARM64STLR, ssa.OpARM64STLR,
ssa.OpARM64STLRW: ssa.OpARM64STLRW:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -327,7 +327,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -327,7 +327,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64MOVHstorezero, ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero, ssa.OpARM64MOVWstorezero,
ssa.OpARM64MOVDstorezero: ssa.OpARM64MOVDstorezero:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGZERO p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -347,18 +347,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -347,18 +347,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r0 := v.Args[0].Reg() r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
out := v.Reg0() out := v.Reg0()
p := gc.Prog(ld) p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = out p.To.Reg = out
p1 := gc.Prog(st) p1 := s.Prog(st)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1 p1.From.Reg = r1
p1.To.Type = obj.TYPE_MEM p1.To.Type = obj.TYPE_MEM
p1.To.Reg = r0 p1.To.Reg = r0
p1.RegTo2 = arm64.REGTMP p1.RegTo2 = arm64.REGTMP
p2 := gc.Prog(arm64.ACBNZ) p2 := s.Prog(arm64.ACBNZ)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
...@@ -378,23 +378,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -378,23 +378,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r0 := v.Args[0].Reg() r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
out := v.Reg0() out := v.Reg0()
p := gc.Prog(ld) p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = out p.To.Reg = out
p1 := gc.Prog(arm64.AADD) p1 := s.Prog(arm64.AADD)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1 p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = out p1.To.Reg = out
p2 := gc.Prog(st) p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = out p2.From.Reg = out
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0 p2.To.Reg = r0
p2.RegTo2 = arm64.REGTMP p2.RegTo2 = arm64.REGTMP
p3 := gc.Prog(arm64.ACBNZ) p3 := s.Prog(arm64.ACBNZ)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
...@@ -419,29 +419,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -419,29 +419,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
r2 := v.Args[2].Reg() r2 := v.Args[2].Reg()
out := v.Reg0() out := v.Reg0()
p := gc.Prog(ld) p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP p.To.Reg = arm64.REGTMP
p1 := gc.Prog(cmp) p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1 p1.From.Reg = r1
p1.Reg = arm64.REGTMP p1.Reg = arm64.REGTMP
p2 := gc.Prog(arm64.ABNE) p2 := s.Prog(arm64.ABNE)
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
p3 := gc.Prog(st) p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = r2 p3.From.Reg = r2
p3.To.Type = obj.TYPE_MEM p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0 p3.To.Reg = r0
p3.RegTo2 = arm64.REGTMP p3.RegTo2 = arm64.REGTMP
p4 := gc.Prog(arm64.ACBNZ) p4 := s.Prog(arm64.ACBNZ)
p4.From.Type = obj.TYPE_REG p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) gc.Patch(p4, p)
p5 := gc.Prog(arm64.ACSET) p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG p5.To.Type = obj.TYPE_REG
...@@ -455,23 +455,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -455,23 +455,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// CBNZ Rtmp, -3(PC) // CBNZ Rtmp, -3(PC)
r0 := v.Args[0].Reg() r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
p := gc.Prog(arm64.ALDAXRB) p := s.Prog(arm64.ALDAXRB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP p.To.Reg = arm64.REGTMP
p1 := gc.Prog(v.Op.Asm()) p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1 p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = arm64.REGTMP p1.To.Reg = arm64.REGTMP
p2 := gc.Prog(arm64.ASTLXRB) p2 := s.Prog(arm64.ASTLXRB)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0 p2.To.Reg = r0
p2.RegTo2 = arm64.REGTMP p2.RegTo2 = arm64.REGTMP
p3 := gc.Prog(arm64.ACBNZ) p3 := s.Prog(arm64.ACBNZ)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
...@@ -499,7 +499,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -499,7 +499,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() { if v.Reg() == v.Args[0].Reg() {
return return
} }
p := gc.Prog(arm64.AMOVD) p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -539,7 +539,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -539,7 +539,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64RBITW, ssa.OpARM64RBITW,
ssa.OpARM64CLZ, ssa.OpARM64CLZ,
ssa.OpARM64CLZW: ssa.OpARM64CLZW:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -550,7 +550,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -550,7 +550,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Op == ssa.OpARM64CSELULT { if v.Op == ssa.OpARM64CSELULT {
r1 = v.Args[1].Reg() r1 = v.Args[1].Reg()
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = arm64.COND_LO p.From.Reg = arm64.COND_LO
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
...@@ -559,13 +559,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -559,13 +559,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpARM64DUFFZERO: case ssa.OpARM64DUFFZERO:
// runtime.duffzero expects start address - 8 in R16 // runtime.duffzero expects start address - 8 in R16
p := gc.Prog(arm64.ASUB) p := s.Prog(arm64.ASUB)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 8 p.From.Offset = 8
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REG_R16 p.To.Reg = arm64.REG_R16
p = gc.Prog(obj.ADUFFZERO) p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero p.To.Sym = gc.Duffzero
...@@ -575,22 +575,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -575,22 +575,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// CMP Rarg1, R16 // CMP Rarg1, R16
// BLE -2(PC) // BLE -2(PC)
// arg1 is the address of the last element to zero // arg1 is the address of the last element to zero
p := gc.Prog(arm64.AMOVD) p := s.Prog(arm64.AMOVD)
p.Scond = arm64.C_XPOST p.Scond = arm64.C_XPOST
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGZERO p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REG_R16 p.To.Reg = arm64.REG_R16
p.To.Offset = 8 p.To.Offset = 8
p2 := gc.Prog(arm64.ACMP) p2 := s.Prog(arm64.ACMP)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg() p2.From.Reg = v.Args[1].Reg()
p2.Reg = arm64.REG_R16 p2.Reg = arm64.REG_R16
p3 := gc.Prog(arm64.ABLE) p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) gc.Patch(p3, p)
case ssa.OpARM64DUFFCOPY: case ssa.OpARM64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY) p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy p.To.Sym = gc.Duffcopy
...@@ -601,32 +601,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -601,32 +601,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// CMP Rarg2, R16 // CMP Rarg2, R16
// BLE -3(PC) // BLE -3(PC)
// arg2 is the address of the last element of src // arg2 is the address of the last element of src
p := gc.Prog(arm64.AMOVD) p := s.Prog(arm64.AMOVD)
p.Scond = arm64.C_XPOST p.Scond = arm64.C_XPOST
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = arm64.REG_R16 p.From.Reg = arm64.REG_R16
p.From.Offset = 8 p.From.Offset = 8
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP p.To.Reg = arm64.REGTMP
p2 := gc.Prog(arm64.AMOVD) p2 := s.Prog(arm64.AMOVD)
p2.Scond = arm64.C_XPOST p2.Scond = arm64.C_XPOST
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm64.REG_R17 p2.To.Reg = arm64.REG_R17
p2.To.Offset = 8 p2.To.Offset = 8
p3 := gc.Prog(arm64.ACMP) p3 := s.Prog(arm64.ACMP)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg() p3.From.Reg = v.Args[2].Reg()
p3.Reg = arm64.REG_R16 p3.Reg = arm64.REG_R16
p4 := gc.Prog(arm64.ABLE) p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) gc.Patch(p4, p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter: case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v) s.Call(v)
case ssa.OpARM64LoweredNilCheck: case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(arm64.AMOVB) p := s.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -646,7 +646,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -646,7 +646,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64GreaterThanU, ssa.OpARM64GreaterThanU,
ssa.OpARM64GreaterEqualU: ssa.OpARM64GreaterEqualU:
// generate boolean values using CSET // generate boolean values using CSET
p := gc.Prog(arm64.ACSET) p := s.Prog(arm64.ACSET)
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = condBits[v.Op] p.From.Reg = condBits[v.Op]
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -703,7 +703,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -703,7 +703,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind { switch b.Kind {
case ssa.BlockPlain: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
...@@ -712,27 +712,27 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -712,27 +712,27 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R0: // defer returns in R0:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(arm64.ACMP) p := s.Prog(arm64.ACMP)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 0 p.From.Offset = 0
p.Reg = arm64.REG_R0 p.Reg = arm64.REG_R0
p = gc.Prog(arm64.ABNE) p = s.Prog(arm64.ABNE)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockExit: case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet: case ssa.BlockRet:
gc.Prog(obj.ARET) s.Prog(obj.ARET)
case ssa.BlockRetJmp: case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET) p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym) p.To.Sym = b.Aux.(*obj.LSym)
...@@ -748,18 +748,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -748,18 +748,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog var p *obj.Prog
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
p = gc.Prog(jmp.invasm) p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block(): case b.Succs[1].Block():
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default: default:
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP) q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
......
...@@ -4253,6 +4253,11 @@ type SSAGenState struct { ...@@ -4253,6 +4253,11 @@ type SSAGenState struct {
stackMapIndex map[*ssa.Value]int stackMapIndex map[*ssa.Value]int
} }
// Prog appends a new Prog.
func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
return Prog(as)
}
// Pc returns the current Prog. // Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog { func (s *SSAGenState) Pc() *obj.Prog {
return pc return pc
...@@ -4411,11 +4416,11 @@ type FloatingEQNEJump struct { ...@@ -4411,11 +4416,11 @@ type FloatingEQNEJump struct {
Index int Index int
} }
func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch { func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction) {
p := Prog(jumps.Jump) p := s.Prog(jumps.Jump)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
to := jumps.Index to := jumps.Index
branches = append(branches, Branch{p, b.Succs[to].Block()}) s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
if to == 1 { if to == 1 {
likely = -likely likely = -likely
} }
...@@ -4431,22 +4436,21 @@ func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPredictio ...@@ -4431,22 +4436,21 @@ func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPredictio
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
} }
return branches
} }
func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
likely := b.Likely likely := b.Likely
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches) s.oneFPJump(b, &jumps[0][0], likely)
s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches) s.oneFPJump(b, &jumps[0][1], likely)
case b.Succs[1].Block(): case b.Succs[1].Block():
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) s.oneFPJump(b, &jumps[1][0], likely)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) s.oneFPJump(b, &jumps[1][1], likely)
default: default:
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) s.oneFPJump(b, &jumps[1][0], likely)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) s.oneFPJump(b, &jumps[1][1], likely)
q := Prog(obj.AJMP) q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
} }
...@@ -4621,7 +4625,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { ...@@ -4621,7 +4625,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
if !ok { if !ok {
Fatalf("missing stack map index for %v", v.LongString()) Fatalf("missing stack map index for %v", v.LongString())
} }
p := Prog(obj.APCDATA) p := s.Prog(obj.APCDATA)
Addrconst(&p.From, obj.PCDATA_StackMapIndex) Addrconst(&p.From, obj.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx)) Addrconst(&p.To, int64(idx))
...@@ -4637,7 +4641,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { ...@@ -4637,7 +4641,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
thearch.Ginsnop() thearch.Ginsnop()
} }
p = Prog(obj.ACALL) p = s.Prog(obj.ACALL)
if sym, ok := v.Aux.(*obj.LSym); ok { if sym, ok := v.Aux.(*obj.LSym); ok {
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
......
...@@ -93,7 +93,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -93,7 +93,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
} }
p := gc.Prog(as) p := s.Prog(as)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x p.From.Reg = x
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -101,7 +101,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -101,7 +101,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) { if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
// cannot move between special registers, use TMP as intermediate // cannot move between special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVW) p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -118,14 +118,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -118,14 +118,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return return
} }
r := v.Reg() r := v.Reg()
p := gc.Prog(loadByType(v.Type, r)) p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
if isHILO(r) { if isHILO(r) {
// cannot directly load, load to TMP and move // cannot directly load, load to TMP and move
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVW) p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -139,14 +139,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -139,14 +139,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Args[0].Reg() r := v.Args[0].Reg()
if isHILO(r) { if isHILO(r) {
// cannot directly store, move to TMP and store // cannot directly store, move to TMP and store
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
r = mips.REGTMP r = mips.REGTMP
} }
p := gc.Prog(storeByType(v.Type, r)) p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -168,7 +168,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -168,7 +168,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSDIVF, ssa.OpMIPSDIVF,
ssa.OpMIPSDIVD, ssa.OpMIPSDIVD,
ssa.OpMIPSMUL: ssa.OpMIPSMUL:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
...@@ -176,7 +176,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -176,7 +176,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpMIPSSGT, case ssa.OpMIPSSGT,
ssa.OpMIPSSGTU: ssa.OpMIPSSGTU:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
...@@ -184,7 +184,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -184,7 +184,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpMIPSSGTzero, case ssa.OpMIPSSGTzero,
ssa.OpMIPSSGTUzero: ssa.OpMIPSSGTUzero:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO p.Reg = mips.REGZERO
...@@ -201,7 +201,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -201,7 +201,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSSRAconst, ssa.OpMIPSSRAconst,
ssa.OpMIPSSGTconst, ssa.OpMIPSSGTconst,
ssa.OpMIPSSGTUconst: ssa.OpMIPSSGTUconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
...@@ -212,13 +212,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -212,13 +212,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSDIV, ssa.OpMIPSDIV,
ssa.OpMIPSDIVU: ssa.OpMIPSDIVU:
// result in hi,lo // result in hi,lo
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
case ssa.OpMIPSMOVWconst: case ssa.OpMIPSMOVWconst:
r := v.Reg() r := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -226,7 +226,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -226,7 +226,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isFPreg(r) || isHILO(r) { if isFPreg(r) || isHILO(r) {
// cannot move into FP or special registers, use TMP as intermediate // cannot move into FP or special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVW) p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -234,7 +234,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -234,7 +234,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
case ssa.OpMIPSMOVFconst, case ssa.OpMIPSMOVFconst,
ssa.OpMIPSMOVDconst: ssa.OpMIPSMOVDconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -243,7 +243,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -243,7 +243,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() != v.Args[0].Reg() { if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
...@@ -253,7 +253,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -253,7 +253,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() != v.Args[0].Reg() { if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.Reg = mips.REGZERO p.Reg = mips.REGZERO
...@@ -265,12 +265,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -265,12 +265,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSCMPGED, ssa.OpMIPSCMPGED,
ssa.OpMIPSCMPGTF, ssa.OpMIPSCMPGTF,
ssa.OpMIPSCMPGTD: ssa.OpMIPSCMPGTD:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
case ssa.OpMIPSMOVWaddr: case ssa.OpMIPSMOVWaddr:
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_ADDR p.From.Type = obj.TYPE_ADDR
var wantreg string var wantreg string
// MOVW $sym+off(base), R // MOVW $sym+off(base), R
...@@ -305,7 +305,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -305,7 +305,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSMOVWload, ssa.OpMIPSMOVWload,
ssa.OpMIPSMOVFload, ssa.OpMIPSMOVFload,
ssa.OpMIPSMOVDload: ssa.OpMIPSMOVDload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -316,7 +316,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -316,7 +316,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSMOVWstore, ssa.OpMIPSMOVWstore,
ssa.OpMIPSMOVFstore, ssa.OpMIPSMOVFstore,
ssa.OpMIPSMOVDstore: ssa.OpMIPSMOVDstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -325,7 +325,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -325,7 +325,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpMIPSMOVBstorezero, case ssa.OpMIPSMOVBstorezero,
ssa.OpMIPSMOVHstorezero, ssa.OpMIPSMOVHstorezero,
ssa.OpMIPSMOVWstorezero: ssa.OpMIPSMOVWstorezero:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -350,7 +350,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -350,7 +350,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() { if v.Reg() == v.Args[0].Reg() {
return return
} }
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -370,14 +370,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -370,14 +370,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSNEGD, ssa.OpMIPSNEGD,
ssa.OpMIPSSQRTD, ssa.OpMIPSSQRTD,
ssa.OpMIPSCLZ: ssa.OpMIPSCLZ:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpMIPSNEG: case ssa.OpMIPSNEG:
// SUB from REGZERO // SUB from REGZERO
p := gc.Prog(mips.ASUBU) p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO p.Reg = mips.REGZERO
...@@ -402,23 +402,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -402,23 +402,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1 sz = 1
mov = mips.AMOVB mov = mips.AMOVB
} }
p := gc.Prog(mips.ASUBU) p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = sz p.From.Offset = sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1 p.To.Reg = mips.REG_R1
p2 := gc.Prog(mov) p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGZERO p2.From.Reg = mips.REGZERO
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = mips.REG_R1 p2.To.Reg = mips.REG_R1
p2.To.Offset = sz p2.To.Offset = sz
p3 := gc.Prog(mips.AADDU) p3 := s.Prog(mips.AADDU)
p3.From.Type = obj.TYPE_CONST p3.From.Type = obj.TYPE_CONST
p3.From.Offset = sz p3.From.Offset = sz
p3.To.Type = obj.TYPE_REG p3.To.Type = obj.TYPE_REG
p3.To.Reg = mips.REG_R1 p3.To.Reg = mips.REG_R1
p4 := gc.Prog(mips.ABNE) p4 := s.Prog(mips.ABNE)
p4.From.Type = obj.TYPE_REG p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg() p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1 p4.Reg = mips.REG_R1
...@@ -445,33 +445,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -445,33 +445,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1 sz = 1
mov = mips.AMOVB mov = mips.AMOVB
} }
p := gc.Prog(mips.ASUBU) p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = sz p.From.Offset = sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1 p.To.Reg = mips.REG_R1
p2 := gc.Prog(mov) p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_MEM p2.From.Type = obj.TYPE_MEM
p2.From.Reg = mips.REG_R1 p2.From.Reg = mips.REG_R1
p2.From.Offset = sz p2.From.Offset = sz
p2.To.Type = obj.TYPE_REG p2.To.Type = obj.TYPE_REG
p2.To.Reg = mips.REGTMP p2.To.Reg = mips.REGTMP
p3 := gc.Prog(mov) p3 := s.Prog(mov)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_MEM p3.To.Type = obj.TYPE_MEM
p3.To.Reg = mips.REG_R2 p3.To.Reg = mips.REG_R2
p4 := gc.Prog(mips.AADDU) p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_CONST p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG p4.To.Type = obj.TYPE_REG
p4.To.Reg = mips.REG_R1 p4.To.Reg = mips.REG_R1
p5 := gc.Prog(mips.AADDU) p5 := s.Prog(mips.AADDU)
p5.From.Type = obj.TYPE_CONST p5.From.Type = obj.TYPE_CONST
p5.From.Offset = sz p5.From.Offset = sz
p5.To.Type = obj.TYPE_REG p5.To.Type = obj.TYPE_REG
p5.To.Reg = mips.REG_R2 p5.To.Reg = mips.REG_R2
p6 := gc.Prog(mips.ABNE) p6 := s.Prog(mips.ABNE)
p6.From.Type = obj.TYPE_REG p6.From.Type = obj.TYPE_REG
p6.From.Reg = v.Args[2].Reg() p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1 p6.Reg = mips.REG_R1
...@@ -480,35 +480,35 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -480,35 +480,35 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter: case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
s.Call(v) s.Call(v)
case ssa.OpMIPSLoweredAtomicLoad: case ssa.OpMIPSLoweredAtomicLoad:
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicStore: case ssa.OpMIPSLoweredAtomicStore:
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicStorezero: case ssa.OpMIPSLoweredAtomicStorezero:
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicExchange: case ssa.OpMIPSLoweredAtomicExchange:
// SYNC // SYNC
// MOVW Rarg1, Rtmp // MOVW Rarg1, Rtmp
...@@ -516,33 +516,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -516,33 +516,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// SC Rtmp, (Rarg0) // SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC) // BEQ Rtmp, -3(PC)
// SYNC // SYNC
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
p1 := gc.Prog(mips.ALL) p1 := s.Prog(mips.ALL)
p1.From.Type = obj.TYPE_MEM p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg() p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = v.Reg0() p1.To.Reg = v.Reg0()
p2 := gc.Prog(mips.ASC) p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg() p2.To.Reg = v.Args[0].Reg()
p3 := gc.Prog(mips.ABEQ) p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) gc.Patch(p3, p)
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicAdd: case ssa.OpMIPSLoweredAtomicAdd:
// SYNC // SYNC
// LL (Rarg0), Rout // LL (Rarg0), Rout
...@@ -551,36 +551,36 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -551,36 +551,36 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BEQ Rtmp, -3(PC) // BEQ Rtmp, -3(PC)
// SYNC // SYNC
// ADDU Rarg1, Rout // ADDU Rarg1, Rout
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p := gc.Prog(mips.ALL) p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
p1 := gc.Prog(mips.AADDU) p1 := s.Prog(mips.AADDU)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = v.Args[1].Reg() p1.From.Reg = v.Args[1].Reg()
p1.Reg = v.Reg0() p1.Reg = v.Reg0()
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ASC) p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg() p2.To.Reg = v.Args[0].Reg()
p3 := gc.Prog(mips.ABEQ) p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) gc.Patch(p3, p)
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p4 := gc.Prog(mips.AADDU) p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_REG p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg() p4.From.Reg = v.Args[1].Reg()
p4.Reg = v.Reg0() p4.Reg = v.Reg0()
...@@ -595,36 +595,36 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -595,36 +595,36 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BEQ Rtmp, -3(PC) // BEQ Rtmp, -3(PC)
// SYNC // SYNC
// ADDU $auxInt, Rout // ADDU $auxInt, Rout
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p := gc.Prog(mips.ALL) p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
p1 := gc.Prog(mips.AADDU) p1 := s.Prog(mips.AADDU)
p1.From.Type = obj.TYPE_CONST p1.From.Type = obj.TYPE_CONST
p1.From.Offset = v.AuxInt p1.From.Offset = v.AuxInt
p1.Reg = v.Reg0() p1.Reg = v.Reg0()
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ASC) p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg() p2.To.Reg = v.Args[0].Reg()
p3 := gc.Prog(mips.ABEQ) p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) gc.Patch(p3, p)
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p4 := gc.Prog(mips.AADDU) p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_CONST p4.From.Type = obj.TYPE_CONST
p4.From.Offset = v.AuxInt p4.From.Offset = v.AuxInt
p4.Reg = v.Reg0() p4.Reg = v.Reg0()
...@@ -639,34 +639,34 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -639,34 +639,34 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// SC Rtmp, (Rarg0) // SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC) // BEQ Rtmp, -3(PC)
// SYNC // SYNC
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p := gc.Prog(mips.ALL) p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
p1 := gc.Prog(v.Op.Asm()) p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = v.Args[1].Reg() p1.From.Reg = v.Args[1].Reg()
p1.Reg = mips.REGTMP p1.Reg = mips.REGTMP
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ASC) p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg() p2.To.Reg = v.Args[0].Reg()
p3 := gc.Prog(mips.ABEQ) p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) gc.Patch(p3, p)
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicCas: case ssa.OpMIPSLoweredAtomicCas:
// MOVW $0, Rout // MOVW $0, Rout
...@@ -677,52 +677,52 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -677,52 +677,52 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// SC Rout, (Rarg0) // SC Rout, (Rarg0)
// BEQ Rout, -4(PC) // BEQ Rout, -4(PC)
// SYNC // SYNC
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p1 := gc.Prog(mips.ALL) p1 := s.Prog(mips.ALL)
p1.From.Type = obj.TYPE_MEM p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg() p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ABNE) p2 := s.Prog(mips.ABNE)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg() p2.From.Reg = v.Args[1].Reg()
p2.Reg = mips.REGTMP p2.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
p3 := gc.Prog(mips.AMOVW) p3 := s.Prog(mips.AMOVW)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg() p3.From.Reg = v.Args[2].Reg()
p3.To.Type = obj.TYPE_REG p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg0() p3.To.Reg = v.Reg0()
p4 := gc.Prog(mips.ASC) p4 := s.Prog(mips.ASC)
p4.From.Type = obj.TYPE_REG p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Reg0() p4.From.Reg = v.Reg0()
p4.To.Type = obj.TYPE_MEM p4.To.Type = obj.TYPE_MEM
p4.To.Reg = v.Args[0].Reg() p4.To.Reg = v.Args[0].Reg()
p5 := gc.Prog(mips.ABEQ) p5 := s.Prog(mips.ABEQ)
p5.From.Type = obj.TYPE_REG p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0() p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1) gc.Patch(p5, p1)
gc.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p6 := gc.Prog(obj.ANOP) p6 := s.Prog(obj.ANOP)
gc.Patch(p2, p6) gc.Patch(p2, p6)
case ssa.OpMIPSLoweredNilCheck: case ssa.OpMIPSLoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(mips.AMOVB) p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -740,12 +740,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -740,12 +740,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Op == ssa.OpMIPSFPFlagFalse { if v.Op == ssa.OpMIPSFPFlagFalse {
cmov = mips.ACMOVT cmov = mips.ACMOVT
} }
p := gc.Prog(mips.AMOVW) p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
p1 := gc.Prog(cmov) p1 := s.Prog(cmov)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = mips.REGZERO p1.From.Reg = mips.REGZERO
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
...@@ -776,7 +776,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -776,7 +776,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind { switch b.Kind {
case ssa.BlockPlain: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
...@@ -784,23 +784,23 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -784,23 +784,23 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R1: // defer returns in R1:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(mips.ABNE) p := s.Prog(mips.ABNE)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1 p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockExit: case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet: case ssa.BlockRet:
gc.Prog(obj.ARET) s.Prog(obj.ARET)
case ssa.BlockRetJmp: case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET) p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym) p.To.Sym = b.Aux.(*obj.LSym)
...@@ -812,18 +812,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -812,18 +812,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog var p *obj.Prog
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
p = gc.Prog(jmp.invasm) p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block(): case b.Succs[1].Block():
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default: default:
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP) q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
......
...@@ -96,7 +96,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -96,7 +96,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isFPreg(x) && isFPreg(y) { if isFPreg(x) && isFPreg(y) {
as = mips.AMOVD as = mips.AMOVD
} }
p := gc.Prog(as) p := s.Prog(as)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x p.From.Reg = x
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -104,7 +104,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -104,7 +104,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) { if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
// cannot move between special registers, use TMP as intermediate // cannot move between special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVV) p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -121,14 +121,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -121,14 +121,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return return
} }
r := v.Reg() r := v.Reg()
p := gc.Prog(loadByType(v.Type, r)) p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
if isHILO(r) { if isHILO(r) {
// cannot directly load, load to TMP and move // cannot directly load, load to TMP and move
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVV) p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -142,14 +142,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -142,14 +142,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Args[0].Reg() r := v.Args[0].Reg()
if isHILO(r) { if isHILO(r) {
// cannot directly store, move to TMP and store // cannot directly store, move to TMP and store
p := gc.Prog(mips.AMOVV) p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
r = mips.REGTMP r = mips.REGTMP
} }
p := gc.Prog(storeByType(v.Type, r)) p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -170,7 +170,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -170,7 +170,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MULD, ssa.OpMIPS64MULD,
ssa.OpMIPS64DIVF, ssa.OpMIPS64DIVF,
ssa.OpMIPS64DIVD: ssa.OpMIPS64DIVD:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
...@@ -178,7 +178,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -178,7 +178,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpMIPS64SGT, case ssa.OpMIPS64SGT,
ssa.OpMIPS64SGTU: ssa.OpMIPS64SGTU:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
...@@ -195,7 +195,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -195,7 +195,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64SRAVconst, ssa.OpMIPS64SRAVconst,
ssa.OpMIPS64SGTconst, ssa.OpMIPS64SGTconst,
ssa.OpMIPS64SGTUconst: ssa.OpMIPS64SGTUconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
...@@ -206,13 +206,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -206,13 +206,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64DIVV, ssa.OpMIPS64DIVV,
ssa.OpMIPS64DIVVU: ssa.OpMIPS64DIVVU:
// result in hi,lo // result in hi,lo
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
case ssa.OpMIPS64MOVVconst: case ssa.OpMIPS64MOVVconst:
r := v.Reg() r := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -220,7 +220,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -220,7 +220,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isFPreg(r) || isHILO(r) { if isFPreg(r) || isHILO(r) {
// cannot move into FP or special registers, use TMP as intermediate // cannot move into FP or special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVV) p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -228,7 +228,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -228,7 +228,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
case ssa.OpMIPS64MOVFconst, case ssa.OpMIPS64MOVFconst,
ssa.OpMIPS64MOVDconst: ssa.OpMIPS64MOVDconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -239,12 +239,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -239,12 +239,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64CMPGED, ssa.OpMIPS64CMPGED,
ssa.OpMIPS64CMPGTF, ssa.OpMIPS64CMPGTF,
ssa.OpMIPS64CMPGTD: ssa.OpMIPS64CMPGTD:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
case ssa.OpMIPS64MOVVaddr: case ssa.OpMIPS64MOVVaddr:
p := gc.Prog(mips.AMOVV) p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_ADDR p.From.Type = obj.TYPE_ADDR
var wantreg string var wantreg string
// MOVV $sym+off(base), R // MOVV $sym+off(base), R
...@@ -281,7 +281,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -281,7 +281,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVVload, ssa.OpMIPS64MOVVload,
ssa.OpMIPS64MOVFload, ssa.OpMIPS64MOVFload,
ssa.OpMIPS64MOVDload: ssa.OpMIPS64MOVDload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -293,7 +293,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -293,7 +293,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVVstore, ssa.OpMIPS64MOVVstore,
ssa.OpMIPS64MOVFstore, ssa.OpMIPS64MOVFstore,
ssa.OpMIPS64MOVDstore: ssa.OpMIPS64MOVDstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -303,7 +303,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -303,7 +303,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVHstorezero, ssa.OpMIPS64MOVHstorezero,
ssa.OpMIPS64MOVWstorezero, ssa.OpMIPS64MOVWstorezero,
ssa.OpMIPS64MOVVstorezero: ssa.OpMIPS64MOVVstorezero:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -332,7 +332,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -332,7 +332,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() { if v.Reg() == v.Args[0].Reg() {
return return
} }
p := gc.Prog(mips.AMOVV) p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -354,14 +354,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -354,14 +354,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVDF, ssa.OpMIPS64MOVDF,
ssa.OpMIPS64NEGF, ssa.OpMIPS64NEGF,
ssa.OpMIPS64NEGD: ssa.OpMIPS64NEGD:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpMIPS64NEGV: case ssa.OpMIPS64NEGV:
// SUB from REGZERO // SUB from REGZERO
p := gc.Prog(mips.ASUBVU) p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO p.Reg = mips.REGZERO
...@@ -369,13 +369,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -369,13 +369,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpMIPS64DUFFZERO: case ssa.OpMIPS64DUFFZERO:
// runtime.duffzero expects start address - 8 in R1 // runtime.duffzero expects start address - 8 in R1
p := gc.Prog(mips.ASUBVU) p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 8 p.From.Offset = 8
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1 p.To.Reg = mips.REG_R1
p = gc.Prog(obj.ADUFFZERO) p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero p.To.Sym = gc.Duffzero
...@@ -402,23 +402,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -402,23 +402,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1 sz = 1
mov = mips.AMOVB mov = mips.AMOVB
} }
p := gc.Prog(mips.ASUBVU) p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = sz p.From.Offset = sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1 p.To.Reg = mips.REG_R1
p2 := gc.Prog(mov) p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGZERO p2.From.Reg = mips.REGZERO
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = mips.REG_R1 p2.To.Reg = mips.REG_R1
p2.To.Offset = sz p2.To.Offset = sz
p3 := gc.Prog(mips.AADDVU) p3 := s.Prog(mips.AADDVU)
p3.From.Type = obj.TYPE_CONST p3.From.Type = obj.TYPE_CONST
p3.From.Offset = sz p3.From.Offset = sz
p3.To.Type = obj.TYPE_REG p3.To.Type = obj.TYPE_REG
p3.To.Reg = mips.REG_R1 p3.To.Reg = mips.REG_R1
p4 := gc.Prog(mips.ABNE) p4 := s.Prog(mips.ABNE)
p4.From.Type = obj.TYPE_REG p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg() p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1 p4.Reg = mips.REG_R1
...@@ -448,33 +448,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -448,33 +448,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1 sz = 1
mov = mips.AMOVB mov = mips.AMOVB
} }
p := gc.Prog(mips.ASUBVU) p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = sz p.From.Offset = sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1 p.To.Reg = mips.REG_R1
p2 := gc.Prog(mov) p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_MEM p2.From.Type = obj.TYPE_MEM
p2.From.Reg = mips.REG_R1 p2.From.Reg = mips.REG_R1
p2.From.Offset = sz p2.From.Offset = sz
p2.To.Type = obj.TYPE_REG p2.To.Type = obj.TYPE_REG
p2.To.Reg = mips.REGTMP p2.To.Reg = mips.REGTMP
p3 := gc.Prog(mov) p3 := s.Prog(mov)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_MEM p3.To.Type = obj.TYPE_MEM
p3.To.Reg = mips.REG_R2 p3.To.Reg = mips.REG_R2
p4 := gc.Prog(mips.AADDVU) p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG p4.To.Type = obj.TYPE_REG
p4.To.Reg = mips.REG_R1 p4.To.Reg = mips.REG_R1
p5 := gc.Prog(mips.AADDVU) p5 := s.Prog(mips.AADDVU)
p5.From.Type = obj.TYPE_CONST p5.From.Type = obj.TYPE_CONST
p5.From.Offset = sz p5.From.Offset = sz
p5.To.Type = obj.TYPE_REG p5.To.Type = obj.TYPE_REG
p5.To.Reg = mips.REG_R2 p5.To.Reg = mips.REG_R2
p6 := gc.Prog(mips.ABNE) p6 := s.Prog(mips.ABNE)
p6.From.Type = obj.TYPE_REG p6.From.Type = obj.TYPE_REG
p6.From.Reg = v.Args[2].Reg() p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1 p6.Reg = mips.REG_R1
...@@ -484,7 +484,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -484,7 +484,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.Call(v) s.Call(v)
case ssa.OpMIPS64LoweredNilCheck: case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(mips.AMOVB) p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -502,19 +502,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -502,19 +502,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Op == ssa.OpMIPS64FPFlagFalse { if v.Op == ssa.OpMIPS64FPFlagFalse {
branch = mips.ABFPT branch = mips.ABFPT
} }
p := gc.Prog(mips.AMOVV) p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
p2 := gc.Prog(branch) p2 := s.Prog(branch)
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
p3 := gc.Prog(mips.AMOVV) p3 := s.Prog(mips.AMOVV)
p3.From.Type = obj.TYPE_CONST p3.From.Type = obj.TYPE_CONST
p3.From.Offset = 1 p3.From.Offset = 1
p3.To.Type = obj.TYPE_REG p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg() p3.To.Reg = v.Reg()
p4 := gc.Prog(obj.ANOP) // not a machine instruction, for branch to land p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
gc.Patch(p2, p4) gc.Patch(p2, p4)
case ssa.OpMIPS64LoweredGetClosurePtr: case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT). // Closure pointer is R22 (mips.REGCTXT).
...@@ -541,7 +541,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -541,7 +541,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind { switch b.Kind {
case ssa.BlockPlain: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
...@@ -549,23 +549,23 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -549,23 +549,23 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R1: // defer returns in R1:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(mips.ABNE) p := s.Prog(mips.ABNE)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1 p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockExit: case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet: case ssa.BlockRet:
gc.Prog(obj.ARET) s.Prog(obj.ARET)
case ssa.BlockRetJmp: case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET) p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym) p.To.Sym = b.Aux.(*obj.LSym)
...@@ -577,18 +577,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -577,18 +577,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog var p *obj.Prog
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
p = gc.Prog(jmp.invasm) p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block(): case b.Succs[1].Block():
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default: default:
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP) q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
......
...@@ -131,9 +131,9 @@ func storeByType(t ssa.Type) obj.As { ...@@ -131,9 +131,9 @@ func storeByType(t ssa.Type) obj.As {
panic("bad store type") panic("bad store type")
} }
func ssaGenISEL(v *ssa.Value, cr int64, r1, r2 int16) { func ssaGenISEL(s *gc.SSAGenState, v *ssa.Value, cr int64, r1, r2 int16) {
r := v.Reg() r := v.Reg()
p := gc.Prog(ppc64.AISEL) p := s.Prog(ppc64.AISEL)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
p.Reg = r1 p.Reg = r1
...@@ -158,7 +158,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -158,7 +158,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if t.IsFloat() { if t.IsFloat() {
op = ppc64.AFMOVD op = ppc64.AFMOVD
} }
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = rt p.From.Type = rt
p.From.Reg = x p.From.Reg = x
p.To.Type = rt p.To.Type = rt
...@@ -170,7 +170,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -170,7 +170,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg() x := v.Args[0].Reg()
y := v.Reg() y := v.Reg()
p := gc.Prog(ppc64.AMFVSRD) p := s.Prog(ppc64.AMFVSRD)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x p.From.Reg = x
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -181,7 +181,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -181,7 +181,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg() x := v.Args[0].Reg()
y := v.Reg() y := v.Reg()
p := gc.Prog(ppc64.AMTVSRD) p := s.Prog(ppc64.AMTVSRD)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x p.From.Reg = x
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -198,28 +198,28 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -198,28 +198,28 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// ISYNC // ISYNC
r0 := v.Args[0].Reg() r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
psync := gc.Prog(ppc64.ASYNC) psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE psync.To.Type = obj.TYPE_NONE
p := gc.Prog(ppc64.ALBAR) p := s.Prog(ppc64.ALBAR)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP p.To.Reg = ppc64.REGTMP
p1 := gc.Prog(v.Op.Asm()) p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1 p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGTMP p1.To.Reg = ppc64.REGTMP
p2 := gc.Prog(ppc64.ASTBCCC) p2 := s.Prog(ppc64.ASTBCCC)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0 p2.To.Reg = r0
p2.RegTo2 = ppc64.REGTMP p2.RegTo2 = ppc64.REGTMP
p3 := gc.Prog(ppc64.ABNE) p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) gc.Patch(p3, p)
pisync := gc.Prog(ppc64.AISYNC) pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE pisync.To.Type = obj.TYPE_NONE
case ssa.OpPPC64LoweredAtomicAdd32, case ssa.OpPPC64LoweredAtomicAdd32,
...@@ -241,37 +241,37 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -241,37 +241,37 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
out := v.Reg0() out := v.Reg0()
// SYNC // SYNC
psync := gc.Prog(ppc64.ASYNC) psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR // LDAR or LWAR
p := gc.Prog(ld) p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = out p.To.Reg = out
// ADD reg1,out // ADD reg1,out
p1 := gc.Prog(ppc64.AADD) p1 := s.Prog(ppc64.AADD)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1 p1.From.Reg = r1
p1.To.Reg = out p1.To.Reg = out
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
// STDCCC or STWCCC // STDCCC or STWCCC
p3 := gc.Prog(st) p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = out p3.From.Reg = out
p3.To.Type = obj.TYPE_MEM p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0 p3.To.Reg = r0
// BNE retry // BNE retry
p4 := gc.Prog(ppc64.ABNE) p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) gc.Patch(p4, p)
// ISYNC // ISYNC
pisync := gc.Prog(ppc64.AISYNC) pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE pisync.To.Type = obj.TYPE_NONE
// Ensure a 32 bit result // Ensure a 32 bit result
if v.Op == ssa.OpPPC64LoweredAtomicAdd32 { if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
p5 := gc.Prog(ppc64.AMOVWZ) p5 := s.Prog(ppc64.AMOVWZ)
p5.To.Type = obj.TYPE_REG p5.To.Type = obj.TYPE_REG
p5.To.Reg = out p5.To.Reg = out
p5.From.Type = obj.TYPE_REG p5.From.Type = obj.TYPE_REG
...@@ -295,26 +295,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -295,26 +295,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
out := v.Reg0() out := v.Reg0()
// SYNC // SYNC
psync := gc.Prog(ppc64.ASYNC) psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR // LDAR or LWAR
p := gc.Prog(ld) p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = out p.To.Reg = out
// STDCCC or STWCCC // STDCCC or STWCCC
p1 := gc.Prog(st) p1 := s.Prog(st)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1 p1.From.Reg = r1
p1.To.Type = obj.TYPE_MEM p1.To.Type = obj.TYPE_MEM
p1.To.Reg = r0 p1.To.Reg = r0
// BNE retry // BNE retry
p2 := gc.Prog(ppc64.ABNE) p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p) gc.Patch(p2, p)
// ISYNC // ISYNC
pisync := gc.Prog(ppc64.AISYNC) pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE pisync.To.Type = obj.TYPE_NONE
case ssa.OpPPC64LoweredAtomicLoad32, case ssa.OpPPC64LoweredAtomicLoad32,
...@@ -334,25 +334,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -334,25 +334,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
arg0 := v.Args[0].Reg() arg0 := v.Args[0].Reg()
out := v.Reg0() out := v.Reg0()
// SYNC // SYNC
psync := gc.Prog(ppc64.ASYNC) psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE psync.To.Type = obj.TYPE_NONE
// Load // Load
p := gc.Prog(ld) p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = arg0 p.From.Reg = arg0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = out p.To.Reg = out
// CMP // CMP
p1 := gc.Prog(cmp) p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = out p1.From.Reg = out
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p1.To.Reg = out p1.To.Reg = out
// BNE // BNE
p2 := gc.Prog(ppc64.ABNE) p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
// ISYNC // ISYNC
pisync := gc.Prog(ppc64.AISYNC) pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE pisync.To.Type = obj.TYPE_NONE
gc.Patch(p2, pisync) gc.Patch(p2, pisync)
...@@ -367,10 +367,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -367,10 +367,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
arg0 := v.Args[0].Reg() arg0 := v.Args[0].Reg()
arg1 := v.Args[1].Reg() arg1 := v.Args[1].Reg()
// SYNC // SYNC
psync := gc.Prog(ppc64.ASYNC) psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE psync.To.Type = obj.TYPE_NONE
// Store // Store
p := gc.Prog(st) p := s.Prog(st)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = arg0 p.To.Reg = arg0
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
...@@ -404,54 +404,54 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -404,54 +404,54 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r2 := v.Args[2].Reg() r2 := v.Args[2].Reg()
out := v.Reg0() out := v.Reg0()
// SYNC // SYNC
psync := gc.Prog(ppc64.ASYNC) psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR // LDAR or LWAR
p := gc.Prog(ld) p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP p.To.Reg = ppc64.REGTMP
// CMP reg1,reg2 // CMP reg1,reg2
p1 := gc.Prog(cmp) p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1 p1.From.Reg = r1
p1.To.Reg = ppc64.REGTMP p1.To.Reg = ppc64.REGTMP
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
// BNE cas_fail // BNE cas_fail
p2 := gc.Prog(ppc64.ABNE) p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
// STDCCC or STWCCC // STDCCC or STWCCC
p3 := gc.Prog(st) p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = r2 p3.From.Reg = r2
p3.To.Type = obj.TYPE_MEM p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0 p3.To.Reg = r0
// BNE retry // BNE retry
p4 := gc.Prog(ppc64.ABNE) p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) gc.Patch(p4, p)
// ISYNC // ISYNC
pisync := gc.Prog(ppc64.AISYNC) pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE pisync.To.Type = obj.TYPE_NONE
// return true // return true
p5 := gc.Prog(ppc64.AMOVD) p5 := s.Prog(ppc64.AMOVD)
p5.From.Type = obj.TYPE_CONST p5.From.Type = obj.TYPE_CONST
p5.From.Offset = 1 p5.From.Offset = 1
p5.To.Type = obj.TYPE_REG p5.To.Type = obj.TYPE_REG
p5.To.Reg = out p5.To.Reg = out
// BR done // BR done
p6 := gc.Prog(obj.AJMP) p6 := s.Prog(obj.AJMP)
p6.To.Type = obj.TYPE_BRANCH p6.To.Type = obj.TYPE_BRANCH
// return false // return false
p7 := gc.Prog(ppc64.AMOVD) p7 := s.Prog(ppc64.AMOVD)
p7.From.Type = obj.TYPE_CONST p7.From.Type = obj.TYPE_CONST
p7.From.Offset = 0 p7.From.Offset = 0
p7.To.Type = obj.TYPE_REG p7.To.Type = obj.TYPE_REG
p7.To.Reg = out p7.To.Reg = out
gc.Patch(p2, p7) gc.Patch(p2, p7)
// done (label) // done (label)
p8 := gc.Prog(obj.ANOP) p8 := s.Prog(obj.ANOP)
gc.Patch(p6, p8) gc.Patch(p6, p8)
case ssa.OpPPC64LoweredGetClosurePtr: case ssa.OpPPC64LoweredGetClosurePtr:
...@@ -463,14 +463,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -463,14 +463,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpLoadReg: case ssa.OpLoadReg:
loadOp := loadByType(v.Type) loadOp := loadByType(v.Type)
p := gc.Prog(loadOp) p := s.Prog(loadOp)
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpStoreReg: case ssa.OpStoreReg:
storeOp := storeByType(v.Type) storeOp := storeByType(v.Type)
p := gc.Prog(storeOp) p := s.Prog(storeOp)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -488,33 +488,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -488,33 +488,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r0 := v.Args[0].Reg() r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
p := gc.Prog(ppc64.ACMP) p := s.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r1 p.From.Reg = r1
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
p.To.Offset = -1 p.To.Offset = -1
pbahead := gc.Prog(ppc64.ABEQ) pbahead := s.Prog(ppc64.ABEQ)
pbahead.To.Type = obj.TYPE_BRANCH pbahead.To.Type = obj.TYPE_BRANCH
p = gc.Prog(v.Op.Asm()) p = s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r1 p.From.Reg = r1
p.Reg = r0 p.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
pbover := gc.Prog(obj.AJMP) pbover := s.Prog(obj.AJMP)
pbover.To.Type = obj.TYPE_BRANCH pbover.To.Type = obj.TYPE_BRANCH
p = gc.Prog(ppc64.ANEG) p = s.Prog(ppc64.ANEG)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r0 p.From.Reg = r0
gc.Patch(pbahead, p) gc.Patch(pbahead, p)
p = gc.Prog(obj.ANOP) p = s.Prog(obj.ANOP)
gc.Patch(pbover, p) gc.Patch(pbover, p)
case ssa.OpPPC64DIVW: case ssa.OpPPC64DIVW:
...@@ -523,33 +523,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -523,33 +523,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r0 := v.Args[0].Reg() r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
p := gc.Prog(ppc64.ACMPW) p := s.Prog(ppc64.ACMPW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r1 p.From.Reg = r1
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
p.To.Offset = -1 p.To.Offset = -1
pbahead := gc.Prog(ppc64.ABEQ) pbahead := s.Prog(ppc64.ABEQ)
pbahead.To.Type = obj.TYPE_BRANCH pbahead.To.Type = obj.TYPE_BRANCH
p = gc.Prog(v.Op.Asm()) p = s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r1 p.From.Reg = r1
p.Reg = r0 p.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
pbover := gc.Prog(obj.AJMP) pbover := s.Prog(obj.AJMP)
pbover.To.Type = obj.TYPE_BRANCH pbover.To.Type = obj.TYPE_BRANCH
p = gc.Prog(ppc64.ANEG) p = s.Prog(ppc64.ANEG)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r0 p.From.Reg = r0
gc.Patch(pbahead, p) gc.Patch(pbahead, p)
p = gc.Prog(obj.ANOP) p = s.Prog(obj.ANOP)
gc.Patch(pbover, p) gc.Patch(pbover, p)
case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS, case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
...@@ -561,7 +561,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -561,7 +561,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg() r := v.Reg()
r1 := v.Args[0].Reg() r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r2 p.From.Reg = r2
p.Reg = r1 p.Reg = r1
...@@ -574,7 +574,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -574,7 +574,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
r3 := v.Args[2].Reg() r3 := v.Args[2].Reg()
// r = r1*r2 ± r3 // r = r1*r2 ± r3
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r1 p.From.Reg = r1
p.Reg = r3 p.Reg = r3
...@@ -586,7 +586,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -586,7 +586,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64MaskIfNotCarry: case ssa.OpPPC64MaskIfNotCarry:
r := v.Reg() r := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -594,7 +594,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -594,7 +594,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64ADDconstForCarry: case ssa.OpPPC64ADDconstForCarry:
r1 := v.Args[0].Reg() r1 := v.Args[0].Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.Reg = r1 p.Reg = r1
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
...@@ -603,7 +603,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -603,7 +603,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP: case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP:
r := v.Reg() r := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
...@@ -611,7 +611,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -611,7 +611,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst, case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst: ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
if v.Aux != nil { if v.Aux != nil {
...@@ -626,7 +626,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -626,7 +626,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpPPC64ANDCCconst: case ssa.OpPPC64ANDCCconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
if v.Aux != nil { if v.Aux != nil {
...@@ -641,7 +641,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -641,7 +641,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = ppc64.REGTMP // discard result p.To.Reg = ppc64.REGTMP // discard result
case ssa.OpPPC64MOVDaddr: case ssa.OpPPC64MOVDaddr:
p := gc.Prog(ppc64.AMOVD) p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
...@@ -673,28 +673,28 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -673,28 +673,28 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
case ssa.OpPPC64MOVDconst: case ssa.OpPPC64MOVDconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst: case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU: case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg() p.To.Reg = v.Args[1].Reg()
case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst: case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
...@@ -702,14 +702,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -702,14 +702,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg: case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
// Shift in register to required size // Shift in register to required size
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload: case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -717,7 +717,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -717,7 +717,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload: case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -725,7 +725,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -725,7 +725,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero: case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -733,14 +733,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -733,14 +733,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore: case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore: case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -766,69 +766,69 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -766,69 +766,69 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// isel rt,0,rtmp,!cond // rt is target in ppc asm // isel rt,0,rtmp,!cond // rt is target in ppc asm
if v.Block.Func.Config.OldArch { if v.Block.Func.Config.OldArch {
p := gc.Prog(ppc64.AMOVD) p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
pb := gc.Prog(condOps[v.Op]) pb := s.Prog(condOps[v.Op])
pb.To.Type = obj.TYPE_BRANCH pb.To.Type = obj.TYPE_BRANCH
p = gc.Prog(ppc64.AMOVD) p = s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 0 p.From.Offset = 0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
p = gc.Prog(obj.ANOP) p = s.Prog(obj.ANOP)
gc.Patch(pb, p) gc.Patch(pb, p)
break break
} }
// Modern PPC uses ISEL // Modern PPC uses ISEL
p := gc.Prog(ppc64.AMOVD) p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = iselRegs[1] p.To.Reg = iselRegs[1]
iop := iselOps[v.Op] iop := iselOps[v.Op]
ssaGenISEL(v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond]) ssaGenISEL(s, v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
case ssa.OpPPC64FLessEqual, // These include a second branch for EQ -- dealing with NaN prevents REL= to !REL conversion case ssa.OpPPC64FLessEqual, // These include a second branch for EQ -- dealing with NaN prevents REL= to !REL conversion
ssa.OpPPC64FGreaterEqual: ssa.OpPPC64FGreaterEqual:
if v.Block.Func.Config.OldArch { if v.Block.Func.Config.OldArch {
p := gc.Prog(ppc64.AMOVW) p := s.Prog(ppc64.AMOVW)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
pb0 := gc.Prog(condOps[v.Op]) pb0 := s.Prog(condOps[v.Op])
pb0.To.Type = obj.TYPE_BRANCH pb0.To.Type = obj.TYPE_BRANCH
pb1 := gc.Prog(ppc64.ABEQ) pb1 := s.Prog(ppc64.ABEQ)
pb1.To.Type = obj.TYPE_BRANCH pb1.To.Type = obj.TYPE_BRANCH
p = gc.Prog(ppc64.AMOVW) p = s.Prog(ppc64.AMOVW)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 0 p.From.Offset = 0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
p = gc.Prog(obj.ANOP) p = s.Prog(obj.ANOP)
gc.Patch(pb0, p) gc.Patch(pb0, p)
gc.Patch(pb1, p) gc.Patch(pb1, p)
break break
} }
// Modern PPC uses ISEL // Modern PPC uses ISEL
p := gc.Prog(ppc64.AMOVD) p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = iselRegs[1] p.To.Reg = iselRegs[1]
iop := iselOps[v.Op] iop := iselOps[v.Op]
ssaGenISEL(v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond]) ssaGenISEL(s, v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
ssaGenISEL(v, ppc64.C_COND_EQ, iselRegs[1], v.Reg()) ssaGenISEL(s, v, ppc64.C_COND_EQ, iselRegs[1], v.Reg())
case ssa.OpPPC64LoweredZero: case ssa.OpPPC64LoweredZero:
...@@ -879,13 +879,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -879,13 +879,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// than 1 iteration. // than 1 iteration.
if ctr > 1 { if ctr > 1 {
// Set up CTR loop counter // Set up CTR loop counter
p := gc.Prog(ppc64.AMOVD) p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = ctr p.From.Offset = ctr
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP p.To.Reg = ppc64.REGTMP
p = gc.Prog(ppc64.AMOVD) p = s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGTMP p.From.Reg = ppc64.REGTMP
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -896,7 +896,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -896,7 +896,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
var top *obj.Prog var top *obj.Prog
for offset := int64(0); offset < 32; offset += 8 { for offset := int64(0); offset < 32; offset += 8 {
// This is the top of loop // This is the top of loop
p := gc.Prog(ppc64.AMOVD) p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0 p.From.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -910,7 +910,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -910,7 +910,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Increment address for the // Increment address for the
// 4 doublewords just zeroed. // 4 doublewords just zeroed.
p = gc.Prog(ppc64.AADD) p = s.Prog(ppc64.AADD)
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 32 p.From.Offset = 32
...@@ -920,7 +920,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -920,7 +920,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Branch back to top of loop // Branch back to top of loop
// based on CTR // based on CTR
// BC with BO_BCTR generates bdnz // BC with BO_BCTR generates bdnz
p = gc.Prog(ppc64.ABC) p = s.Prog(ppc64.ABC)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = ppc64.BO_BCTR p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0 p.Reg = ppc64.REG_R0
...@@ -951,7 +951,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -951,7 +951,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case rem >= 2: case rem >= 2:
op, size = ppc64.AMOVH, 2 op, size = ppc64.AMOVH, 2
} }
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0 p.From.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -994,41 +994,41 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -994,41 +994,41 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
movu = ppc64.AMOVBU movu = ppc64.AMOVBU
} }
p := gc.Prog(ppc64.AADD) p := s.Prog(ppc64.AADD)
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = -sz p.From.Offset = -sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
p = gc.Prog(ppc64.AADD) p = s.Prog(ppc64.AADD)
p.Reg = v.Args[1].Reg() p.Reg = v.Args[1].Reg()
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = -sz p.From.Offset = -sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg() p.To.Reg = v.Args[1].Reg()
p = gc.Prog(movu) p = s.Prog(movu)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.From.Offset = sz p.From.Offset = sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP p.To.Reg = ppc64.REGTMP
p2 := gc.Prog(movu) p2 := s.Prog(movu)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg() p2.To.Reg = v.Args[0].Reg()
p2.To.Offset = sz p2.To.Offset = sz
p3 := gc.Prog(ppc64.ACMPU) p3 := s.Prog(ppc64.ACMPU)
p3.From.Reg = v.Args[1].Reg() p3.From.Reg = v.Args[1].Reg()
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.To.Reg = v.Args[2].Reg() p3.To.Reg = v.Args[2].Reg()
p3.To.Type = obj.TYPE_REG p3.To.Type = obj.TYPE_REG
p4 := gc.Prog(ppc64.ABLT) p4 := s.Prog(ppc64.ABLT)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) gc.Patch(p4, p)
...@@ -1036,7 +1036,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -1036,7 +1036,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.Call(v) s.Call(v)
case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter: case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
p := gc.Prog(ppc64.AMOVD) p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -1049,7 +1049,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -1049,7 +1049,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// change the register allocation to put the value in // change the register allocation to put the value in
// R12 already, but I don't know how to do that. // R12 already, but I don't know how to do that.
// TODO: We have the technology now to implement TODO above. // TODO: We have the technology now to implement TODO above.
q := gc.Prog(ppc64.AMOVD) q := s.Prog(ppc64.AMOVD)
q.From = p.From q.From = p.From
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = ppc64.REG_R12 q.To.Reg = ppc64.REG_R12
...@@ -1063,7 +1063,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -1063,7 +1063,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// called via pointer might have been implemented in // called via pointer might have been implemented in
// a separate module and so overwritten the TOC // a separate module and so overwritten the TOC
// pointer in R2; reload it. // pointer in R2; reload it.
q := gc.Prog(ppc64.AMOVD) q := s.Prog(ppc64.AMOVD)
q.From.Type = obj.TYPE_MEM q.From.Type = obj.TYPE_MEM
q.From.Offset = 24 q.From.Offset = 24
q.From.Reg = ppc64.REGSP q.From.Reg = ppc64.REGSP
...@@ -1073,7 +1073,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -1073,7 +1073,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64LoweredNilCheck: case ssa.OpPPC64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(ppc64.AMOVBZ) p := s.Prog(ppc64.AMOVBZ)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -1118,33 +1118,33 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -1118,33 +1118,33 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R3: // defer returns in R3:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(ppc64.ACMP) p := s.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R3 p.From.Reg = ppc64.REG_R3
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R0 p.To.Reg = ppc64.REG_R0
p = gc.Prog(ppc64.ABNE) p = s.Prog(ppc64.ABNE)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockPlain: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockExit: case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet: case ssa.BlockRet:
gc.Prog(obj.ARET) s.Prog(obj.ARET)
case ssa.BlockRetJmp: case ssa.BlockRetJmp:
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym) p.To.Sym = b.Aux.(*obj.LSym)
...@@ -1159,35 +1159,35 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -1159,35 +1159,35 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog var p *obj.Prog
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
p = gc.Prog(jmp.invasm) p = s.Prog(jmp.invasm)
likely *= -1 likely *= -1
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if jmp.invasmun { if jmp.invasmun {
// TODO: The second branch is probably predict-not-taken since it is for FP unordered // TODO: The second branch is probably predict-not-taken since it is for FP unordered
q := gc.Prog(ppc64.ABVS) q := s.Prog(ppc64.ABVS)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
case b.Succs[1].Block(): case b.Succs[1].Block():
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
if jmp.asmeq { if jmp.asmeq {
q := gc.Prog(ppc64.ABEQ) q := s.Prog(ppc64.ABEQ)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
} }
default: default:
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
if jmp.asmeq { if jmp.asmeq {
q := gc.Prog(ppc64.ABEQ) q := s.Prog(ppc64.ABEQ)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
} }
q := gc.Prog(obj.AJMP) q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
......
...@@ -132,8 +132,8 @@ func moveByType(t ssa.Type) obj.As { ...@@ -132,8 +132,8 @@ func moveByType(t ssa.Type) obj.As {
// dest := dest(To) op src(From) // dest := dest(To) op src(From)
// and also returns the created obj.Prog so it // and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc). // may be further adjusted (offset, scale, etc).
func opregreg(op obj.As, dest, src int16) *obj.Prog { func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = dest p.To.Reg = dest
...@@ -145,8 +145,8 @@ func opregreg(op obj.As, dest, src int16) *obj.Prog { ...@@ -145,8 +145,8 @@ func opregreg(op obj.As, dest, src int16) *obj.Prog {
// dest := src(From) op off // dest := src(From) op off
// and also returns the created obj.Prog so it // and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc). // may be further adjusted (offset, scale, etc).
func opregregimm(op obj.As, dest, src int16, off int64) *obj.Prog { func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog {
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = off p.From.Offset = off
p.Reg = src p.Reg = src
...@@ -166,7 +166,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -166,7 +166,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r2 == s390x.REG_R0 { if r2 == s390x.REG_R0 {
v.Fatalf("cannot use R0 as shift value %s", v.LongString()) v.Fatalf("cannot use R0 as shift value %s", v.LongString())
} }
p := opregreg(v.Op.Asm(), r, r2) p := opregreg(s, v.Op.Asm(), r, r2)
if r != r1 { if r != r1 {
p.Reg = r1 p.Reg = r1
} }
...@@ -178,7 +178,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -178,7 +178,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg() r := v.Reg()
r1 := v.Args[0].Reg() r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
p := opregreg(v.Op.Asm(), r, r2) p := opregreg(s, v.Op.Asm(), r, r2)
if r != r1 { if r != r1 {
p.Reg = r1 p.Reg = r1
} }
...@@ -191,7 +191,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -191,7 +191,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
opregreg(v.Op.Asm(), r, v.Args[1].Reg()) opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.OpS390XFMADD, ssa.OpS390XFMADDS, case ssa.OpS390XFMADD, ssa.OpS390XFMADDS,
ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS: ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS:
r := v.Reg() r := v.Reg()
...@@ -200,7 +200,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -200,7 +200,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
r1 := v.Args[1].Reg() r1 := v.Args[1].Reg()
r2 := v.Args[2].Reg() r2 := v.Args[2].Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r1 p.From.Reg = r1
p.Reg = r2 p.Reg = r2
...@@ -222,8 +222,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -222,8 +222,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW { v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW {
var c *obj.Prog var c *obj.Prog
c = gc.Prog(s390x.ACMP) c = s.Prog(s390x.ACMP)
j = gc.Prog(s390x.ABEQ) j = s.Prog(s390x.ABEQ)
c.From.Type = obj.TYPE_REG c.From.Type = obj.TYPE_REG
c.From.Reg = divisor c.From.Reg = divisor
...@@ -234,7 +234,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -234,7 +234,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = divisor p.From.Reg = divisor
p.Reg = 0 p.Reg = 0
...@@ -243,18 +243,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -243,18 +243,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// signed division, rest of the check for -1 case // signed division, rest of the check for -1 case
if j != nil { if j != nil {
j2 := gc.Prog(s390x.ABR) j2 := s.Prog(s390x.ABR)
j2.To.Type = obj.TYPE_BRANCH j2.To.Type = obj.TYPE_BRANCH
var n *obj.Prog var n *obj.Prog
if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW { if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW {
// n * -1 = -n // n * -1 = -n
n = gc.Prog(s390x.ANEG) n = s.Prog(s390x.ANEG)
n.To.Type = obj.TYPE_REG n.To.Type = obj.TYPE_REG
n.To.Reg = dividend n.To.Reg = dividend
} else { } else {
// n % -1 == 0 // n % -1 == 0
n = gc.Prog(s390x.AXOR) n = s.Prog(s390x.AXOR)
n.From.Type = obj.TYPE_REG n.From.Type = obj.TYPE_REG
n.From.Reg = dividend n.From.Reg = dividend
n.To.Type = obj.TYPE_REG n.To.Type = obj.TYPE_REG
...@@ -265,7 +265,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -265,7 +265,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
j2.To.Val = s.Pc() j2.To.Val = s.Pc()
} }
case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
opregregimm(v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst, case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst,
ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst, ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst,
ssa.OpS390XANDconst, ssa.OpS390XANDWconst, ssa.OpS390XANDconst, ssa.OpS390XANDWconst,
...@@ -275,7 +275,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -275,7 +275,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -284,7 +284,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -284,7 +284,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst: ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
r := v.Reg() r := v.Reg()
...@@ -296,7 +296,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -296,7 +296,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r p.To.Reg = r
case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask: case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask:
r := v.Reg() r := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -304,7 +304,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -304,7 +304,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpS390XMOVDaddridx: case ssa.OpS390XMOVDaddridx:
r := v.Args[0].Reg() r := v.Args[0].Reg()
i := v.Args[1].Reg() i := v.Args[1].Reg()
p := gc.Prog(s390x.AMOVD) p := s.Prog(s390x.AMOVD)
p.From.Scale = 1 p.From.Scale = 1
if i == s390x.REGSP { if i == s390x.REGSP {
r, i = i, r r, i = i, r
...@@ -316,32 +316,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -316,32 +316,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpS390XMOVDaddr: case ssa.OpS390XMOVDaddr:
p := gc.Prog(s390x.AMOVD) p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: case ssa.OpS390XFCMPS, ssa.OpS390XFCMP:
opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
case ssa.OpS390XMOVDconst: case ssa.OpS390XMOVDconst:
x := v.Reg() x := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x p.To.Reg = x
case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst: case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst:
x := v.Reg() x := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -356,7 +356,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -356,7 +356,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -367,7 +367,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -367,7 +367,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload,
ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload: ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -381,7 +381,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -381,7 +381,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == s390x.REGSP { if i == s390x.REGSP {
r, i = i, r r, i = i, r
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r p.From.Reg = r
p.From.Scale = 1 p.From.Scale = 1
...@@ -392,7 +392,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -392,7 +392,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore, ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore,
ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore: ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -406,7 +406,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -406,7 +406,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == s390x.REGSP { if i == s390x.REGSP {
r, i = i, r r, i = i, r
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -415,7 +415,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -415,7 +415,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = i p.To.Index = i
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff() sc := v.AuxValAndOff()
p.From.Offset = sc.Val() p.From.Offset = sc.Val()
...@@ -428,9 +428,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -428,9 +428,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
ssa.OpS390XFNEG, ssa.OpS390XFNEGS: ssa.OpS390XFNEG, ssa.OpS390XFNEGS:
opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg()) opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.OpS390XCLEAR: case ssa.OpS390XCLEAR:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff() sc := v.AuxValAndOff()
p.From.Offset = sc.Val() p.From.Offset = sc.Val()
...@@ -444,7 +444,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -444,7 +444,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg() x := v.Args[0].Reg()
y := v.Reg() y := v.Reg()
if x != y { if x != y {
opregreg(moveByType(v.Type), y, x) opregreg(s, moveByType(v.Type), y, x)
} }
case ssa.OpS390XMOVDnop: case ssa.OpS390XMOVDnop:
if v.Reg() != v.Args[0].Reg() { if v.Reg() != v.Args[0].Reg() {
...@@ -456,7 +456,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -456,7 +456,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("load flags not implemented: %v", v.LongString()) v.Fatalf("load flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(loadByType(v.Type)) p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
...@@ -465,7 +465,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -465,7 +465,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString()) v.Fatalf("store flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(storeByType(v.Type)) p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -476,7 +476,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -476,7 +476,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// input is already rounded // input is already rounded
case ssa.OpS390XLoweredGetG: case ssa.OpS390XLoweredGetG:
r := v.Reg() r := v.Reg()
p := gc.Prog(s390x.AMOVD) p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = s390x.REGG p.From.Reg = s390x.REGG
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -485,7 +485,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -485,7 +485,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.Call(v) s.Call(v)
case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW, case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW,
ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR: ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -500,13 +500,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -500,13 +500,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.OpS390XFSQRT: case ssa.OpS390XFSQRT:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -519,7 +519,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -519,7 +519,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
case ssa.OpS390XLoweredNilCheck: case ssa.OpS390XLoweredNilCheck:
// Issue a load which will fault if the input is nil. // Issue a load which will fault if the input is nil.
p := gc.Prog(s390x.AMOVBZ) p := s.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -530,7 +530,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -530,7 +530,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
case ssa.OpS390XMVC: case ssa.OpS390XMVC:
vo := v.AuxValAndOff() vo := v.AuxValAndOff()
p := gc.Prog(s390x.AMVC) p := s.Prog(s390x.AMVC)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.From.Offset = vo.Off() p.From.Offset = vo.Off()
...@@ -547,7 +547,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -547,7 +547,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("invalid store multiple %s", v.LongString()) v.Fatalf("invalid store multiple %s", v.LongString())
} }
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[len(v.Args)-2].Reg() p.Reg = v.Args[len(v.Args)-2].Reg()
...@@ -566,7 +566,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -566,7 +566,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE mvc // BNE mvc
// MVC $rem, 0(R2), 0(R1) // if rem > 0 // MVC $rem, 0(R2), 0(R1) // if rem > 0
// arg2 is the last address to move in the loop + 256 // arg2 is the last address to move in the loop + 256
mvc := gc.Prog(s390x.AMVC) mvc := s.Prog(s390x.AMVC)
mvc.From.Type = obj.TYPE_MEM mvc.From.Type = obj.TYPE_MEM
mvc.From.Reg = v.Args[1].Reg() mvc.From.Reg = v.Args[1].Reg()
mvc.To.Type = obj.TYPE_MEM mvc.To.Type = obj.TYPE_MEM
...@@ -576,7 +576,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -576,7 +576,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
mvc.From3.Offset = 256 mvc.From3.Offset = 256
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
movd := gc.Prog(s390x.AMOVD) movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_ADDR movd.From.Type = obj.TYPE_ADDR
movd.From.Reg = v.Args[i].Reg() movd.From.Reg = v.Args[i].Reg()
movd.From.Offset = 256 movd.From.Offset = 256
...@@ -584,18 +584,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -584,18 +584,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
movd.To.Reg = v.Args[i].Reg() movd.To.Reg = v.Args[i].Reg()
} }
cmpu := gc.Prog(s390x.ACMPU) cmpu := s.Prog(s390x.ACMPU)
cmpu.From.Reg = v.Args[1].Reg() cmpu.From.Reg = v.Args[1].Reg()
cmpu.From.Type = obj.TYPE_REG cmpu.From.Type = obj.TYPE_REG
cmpu.To.Reg = v.Args[2].Reg() cmpu.To.Reg = v.Args[2].Reg()
cmpu.To.Type = obj.TYPE_REG cmpu.To.Type = obj.TYPE_REG
bne := gc.Prog(s390x.ABLT) bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, mvc) gc.Patch(bne, mvc)
if v.AuxInt > 0 { if v.AuxInt > 0 {
mvc := gc.Prog(s390x.AMVC) mvc := s.Prog(s390x.AMVC)
mvc.From.Type = obj.TYPE_MEM mvc.From.Type = obj.TYPE_MEM
mvc.From.Reg = v.Args[1].Reg() mvc.From.Reg = v.Args[1].Reg()
mvc.To.Type = obj.TYPE_MEM mvc.To.Type = obj.TYPE_MEM
...@@ -615,52 +615,52 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -615,52 +615,52 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE clear // BNE clear
// CLEAR $rem, 0(R1) // if rem > 0 // CLEAR $rem, 0(R1) // if rem > 0
// arg1 is the last address to zero in the loop + 256 // arg1 is the last address to zero in the loop + 256
clear := gc.Prog(s390x.ACLEAR) clear := s.Prog(s390x.ACLEAR)
clear.From.Type = obj.TYPE_CONST clear.From.Type = obj.TYPE_CONST
clear.From.Offset = 256 clear.From.Offset = 256
clear.To.Type = obj.TYPE_MEM clear.To.Type = obj.TYPE_MEM
clear.To.Reg = v.Args[0].Reg() clear.To.Reg = v.Args[0].Reg()
movd := gc.Prog(s390x.AMOVD) movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_ADDR movd.From.Type = obj.TYPE_ADDR
movd.From.Reg = v.Args[0].Reg() movd.From.Reg = v.Args[0].Reg()
movd.From.Offset = 256 movd.From.Offset = 256
movd.To.Type = obj.TYPE_REG movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Args[0].Reg() movd.To.Reg = v.Args[0].Reg()
cmpu := gc.Prog(s390x.ACMPU) cmpu := s.Prog(s390x.ACMPU)
cmpu.From.Reg = v.Args[0].Reg() cmpu.From.Reg = v.Args[0].Reg()
cmpu.From.Type = obj.TYPE_REG cmpu.From.Type = obj.TYPE_REG
cmpu.To.Reg = v.Args[1].Reg() cmpu.To.Reg = v.Args[1].Reg()
cmpu.To.Type = obj.TYPE_REG cmpu.To.Type = obj.TYPE_REG
bne := gc.Prog(s390x.ABLT) bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, clear) gc.Patch(bne, clear)
if v.AuxInt > 0 { if v.AuxInt > 0 {
clear := gc.Prog(s390x.ACLEAR) clear := s.Prog(s390x.ACLEAR)
clear.From.Type = obj.TYPE_CONST clear.From.Type = obj.TYPE_CONST
clear.From.Offset = v.AuxInt clear.From.Offset = v.AuxInt
clear.To.Type = obj.TYPE_MEM clear.To.Type = obj.TYPE_MEM
clear.To.Reg = v.Args[0].Reg() clear.To.Reg = v.Args[0].Reg()
} }
case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload: case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore: case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpS390XLAA, ssa.OpS390XLAAG: case ssa.OpS390XLAA, ssa.OpS390XLAAG:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.Reg = v.Reg0() p.Reg = v.Reg0()
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
...@@ -676,7 +676,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -676,7 +676,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// NOP (so the BNE has somewhere to land) // NOP (so the BNE has somewhere to land)
// CS{,G} arg1, arg2, arg0 // CS{,G} arg1, arg2, arg0
cs := gc.Prog(v.Op.Asm()) cs := s.Prog(v.Op.Asm())
cs.From.Type = obj.TYPE_REG cs.From.Type = obj.TYPE_REG
cs.From.Reg = v.Args[1].Reg() // old cs.From.Reg = v.Args[1].Reg() // old
cs.Reg = v.Args[2].Reg() // new cs.Reg = v.Args[2].Reg() // new
...@@ -685,25 +685,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -685,25 +685,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&cs.To, v) gc.AddAux(&cs.To, v)
// MOVD $0, ret // MOVD $0, ret
movd := gc.Prog(s390x.AMOVD) movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_CONST movd.From.Type = obj.TYPE_CONST
movd.From.Offset = 0 movd.From.Offset = 0
movd.To.Type = obj.TYPE_REG movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Reg0() movd.To.Reg = v.Reg0()
// BNE 2(PC) // BNE 2(PC)
bne := gc.Prog(s390x.ABNE) bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH bne.To.Type = obj.TYPE_BRANCH
// MOVD $1, ret // MOVD $1, ret
movd = gc.Prog(s390x.AMOVD) movd = s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_CONST movd.From.Type = obj.TYPE_CONST
movd.From.Offset = 1 movd.From.Offset = 1
movd.To.Type = obj.TYPE_REG movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Reg0() movd.To.Reg = v.Reg0()
// NOP (so the BNE has somewhere to land) // NOP (so the BNE has somewhere to land)
nop := gc.Prog(obj.ANOP) nop := s.Prog(obj.ANOP)
gc.Patch(bne, nop) gc.Patch(bne, nop)
case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
// Loop until the CS{,G} succeeds. // Loop until the CS{,G} succeeds.
...@@ -712,7 +712,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -712,7 +712,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE cs // BNE cs
// MOV{WZ,D} arg0, ret // MOV{WZ,D} arg0, ret
load := gc.Prog(loadByType(v.Type.FieldType(0))) load := s.Prog(loadByType(v.Type.FieldType(0)))
load.From.Type = obj.TYPE_MEM load.From.Type = obj.TYPE_MEM
load.From.Reg = v.Args[0].Reg() load.From.Reg = v.Args[0].Reg()
load.To.Type = obj.TYPE_REG load.To.Type = obj.TYPE_REG
...@@ -720,7 +720,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -720,7 +720,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&load.From, v) gc.AddAux(&load.From, v)
// CS{,G} ret, arg1, arg0 // CS{,G} ret, arg1, arg0
cs := gc.Prog(v.Op.Asm()) cs := s.Prog(v.Op.Asm())
cs.From.Type = obj.TYPE_REG cs.From.Type = obj.TYPE_REG
cs.From.Reg = v.Reg0() // old cs.From.Reg = v.Reg0() // old
cs.Reg = v.Args[1].Reg() // new cs.Reg = v.Args[1].Reg() // new
...@@ -729,7 +729,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -729,7 +729,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&cs.To, v) gc.AddAux(&cs.To, v)
// BNE cs // BNE cs
bne := gc.Prog(s390x.ABNE) bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, cs) gc.Patch(bne, cs)
default: default:
...@@ -754,7 +754,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -754,7 +754,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind { switch b.Kind {
case ssa.BlockPlain: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(s390x.ABR) p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
...@@ -762,25 +762,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -762,25 +762,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R3: // defer returns in R3:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(s390x.ACMPW) p := s.Prog(s390x.ACMPW)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = s390x.REG_R3 p.From.Reg = s390x.REG_R3
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
p.To.Offset = 0 p.To.Offset = 0
p = gc.Prog(s390x.ABNE) p = s.Prog(s390x.ABNE)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(s390x.ABR) p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockExit: case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet: case ssa.BlockRet:
gc.Prog(obj.ARET) s.Prog(obj.ARET)
case ssa.BlockRetJmp: case ssa.BlockRetJmp:
p := gc.Prog(s390x.ABR) p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym) p.To.Sym = b.Aux.(*obj.LSym)
...@@ -793,19 +793,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -793,19 +793,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog var p *obj.Prog
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
p = gc.Prog(jmp.invasm) p = s.Prog(jmp.invasm)
likely *= -1 likely *= -1
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block(): case b.Succs[1].Block():
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default: default:
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(s390x.ABR) q := s.Prog(s390x.ABR)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
......
...@@ -21,7 +21,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -21,7 +21,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op { switch v.Op {
case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst: case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
p := gc.Prog(loadPush(v.Type)) p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -29,7 +29,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -29,7 +29,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
popAndSave(s, v) popAndSave(s, v)
case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2: case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
p := gc.Prog(loadPush(v.Type)) p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -37,7 +37,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -37,7 +37,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
popAndSave(s, v) popAndSave(s, v)
case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, ssa.Op386MOVSSloadidx4, ssa.Op386MOVSDloadidx8: case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, ssa.Op386MOVSSloadidx4, ssa.Op386MOVSDloadidx8:
p := gc.Prog(loadPush(v.Type)) p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -68,7 +68,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -68,7 +68,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386MOVSDstore: case ssa.Op386MOVSDstore:
op = x86.AFMOVDP op = x86.AFMOVDP
} }
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -84,7 +84,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -84,7 +84,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8: case ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8:
op = x86.AFMOVDP op = x86.AFMOVDP
} }
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -114,9 +114,9 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -114,9 +114,9 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
// Set precision if needed. 64 bits is the default. // Set precision if needed. 64 bits is the default.
switch v.Op { switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS: case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
p := gc.Prog(x86.AFSTCW) p := s.Prog(x86.AFSTCW)
s.AddrScratch(&p.To) s.AddrScratch(&p.To)
p = gc.Prog(x86.AFLDCW) p = s.Prog(x86.AFLDCW)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN p.From.Name = obj.NAME_EXTERN
p.From.Sym = gc.Sysfunc("controlWord32") p.From.Sym = gc.Sysfunc("controlWord32")
...@@ -133,7 +133,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -133,7 +133,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386DIVSS, ssa.Op386DIVSD: case ssa.Op386DIVSS, ssa.Op386DIVSD:
op = x86.AFDIVDP op = x86.AFDIVDP
} }
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -142,7 +142,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -142,7 +142,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
// Restore precision if needed. // Restore precision if needed.
switch v.Op { switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS: case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
p := gc.Prog(x86.AFLDCW) p := s.Prog(x86.AFLDCW)
s.AddrScratch(&p.From) s.AddrScratch(&p.From)
} }
...@@ -150,48 +150,48 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -150,48 +150,48 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
push(s, v.Args[0]) push(s, v.Args[0])
// Compare. // Compare.
p := gc.Prog(x86.AFUCOMP) p := s.Prog(x86.AFUCOMP)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = s.SSEto387[v.Args[1].Reg()] + 1 p.To.Reg = s.SSEto387[v.Args[1].Reg()] + 1
// Save AX. // Save AX.
p = gc.Prog(x86.AMOVL) p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX p.From.Reg = x86.REG_AX
s.AddrScratch(&p.To) s.AddrScratch(&p.To)
// Move status word into AX. // Move status word into AX.
p = gc.Prog(x86.AFSTSW) p = s.Prog(x86.AFSTSW)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX p.To.Reg = x86.REG_AX
// Then move the flags we need to the integer flags. // Then move the flags we need to the integer flags.
gc.Prog(x86.ASAHF) s.Prog(x86.ASAHF)
// Restore AX. // Restore AX.
p = gc.Prog(x86.AMOVL) p = s.Prog(x86.AMOVL)
s.AddrScratch(&p.From) s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX p.To.Reg = x86.REG_AX
case ssa.Op386SQRTSD: case ssa.Op386SQRTSD:
push(s, v.Args[0]) push(s, v.Args[0])
gc.Prog(x86.AFSQRT) s.Prog(x86.AFSQRT)
popAndSave(s, v) popAndSave(s, v)
case ssa.Op386FCHS: case ssa.Op386FCHS:
push(s, v.Args[0]) push(s, v.Args[0])
gc.Prog(x86.AFCHS) s.Prog(x86.AFCHS)
popAndSave(s, v) popAndSave(s, v)
case ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD: case ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD:
p := gc.Prog(x86.AMOVL) p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
s.AddrScratch(&p.To) s.AddrScratch(&p.To)
p = gc.Prog(x86.AFMOVL) p = s.Prog(x86.AFMOVL)
s.AddrScratch(&p.From) s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0 p.To.Reg = x86.REG_F0
...@@ -201,28 +201,28 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -201,28 +201,28 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
push(s, v.Args[0]) push(s, v.Args[0])
// Save control word. // Save control word.
p := gc.Prog(x86.AFSTCW) p := s.Prog(x86.AFSTCW)
s.AddrScratch(&p.To) s.AddrScratch(&p.To)
p.To.Offset += 4 p.To.Offset += 4
// Load control word which truncates (rounds towards zero). // Load control word which truncates (rounds towards zero).
p = gc.Prog(x86.AFLDCW) p = s.Prog(x86.AFLDCW)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN p.From.Name = obj.NAME_EXTERN
p.From.Sym = gc.Sysfunc("controlWord64trunc") p.From.Sym = gc.Sysfunc("controlWord64trunc")
// Now do the conversion. // Now do the conversion.
p = gc.Prog(x86.AFMOVLP) p = s.Prog(x86.AFMOVLP)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
s.AddrScratch(&p.To) s.AddrScratch(&p.To)
p = gc.Prog(x86.AMOVL) p = s.Prog(x86.AMOVL)
s.AddrScratch(&p.From) s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
// Restore control word. // Restore control word.
p = gc.Prog(x86.AFLDCW) p = s.Prog(x86.AFLDCW)
s.AddrScratch(&p.From) s.AddrScratch(&p.From)
p.From.Offset += 4 p.From.Offset += 4
...@@ -234,11 +234,11 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -234,11 +234,11 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386CVTSD2SS: case ssa.Op386CVTSD2SS:
// Round to nearest float32. // Round to nearest float32.
push(s, v.Args[0]) push(s, v.Args[0])
p := gc.Prog(x86.AFMOVFP) p := s.Prog(x86.AFMOVFP)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
s.AddrScratch(&p.To) s.AddrScratch(&p.To)
p = gc.Prog(x86.AFMOVF) p = s.Prog(x86.AFMOVF)
s.AddrScratch(&p.From) s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0 p.To.Reg = x86.REG_F0
...@@ -250,7 +250,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -250,7 +250,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
return return
} }
// Load+push the value we need. // Load+push the value we need.
p := gc.Prog(loadPush(v.Type)) p := s.Prog(loadPush(v.Type))
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0 p.To.Reg = x86.REG_F0
...@@ -270,7 +270,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -270,7 +270,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case 8: case 8:
op = x86.AFMOVDP op = x86.AFMOVDP
} }
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -293,7 +293,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { ...@@ -293,7 +293,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
// push pushes v onto the floating-point stack. v must be in a register. // push pushes v onto the floating-point stack. v must be in a register.
func push(s *gc.SSAGenState, v *ssa.Value) { func push(s *gc.SSAGenState, v *ssa.Value) {
p := gc.Prog(x86.AFMOVD) p := s.Prog(x86.AFMOVD)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = s.SSEto387[v.Reg()] p.From.Reg = s.SSEto387[v.Reg()]
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -306,7 +306,7 @@ func popAndSave(s *gc.SSAGenState, v *ssa.Value) { ...@@ -306,7 +306,7 @@ func popAndSave(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg() r := v.Reg()
if _, ok := s.SSEto387[r]; ok { if _, ok := s.SSEto387[r]; ok {
// Pop value, write to correct register. // Pop value, write to correct register.
p := gc.Prog(x86.AFMOVDP) p := s.Prog(x86.AFMOVDP)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -333,7 +333,7 @@ func loadPush(t ssa.Type) obj.As { ...@@ -333,7 +333,7 @@ func loadPush(t ssa.Type) obj.As {
// flush387 removes all entries from the 387 floating-point stack. // flush387 removes all entries from the 387 floating-point stack.
func flush387(s *gc.SSAGenState) { func flush387(s *gc.SSAGenState) {
for k := range s.SSEto387 { for k := range s.SSEto387 {
p := gc.Prog(x86.AFMOVDP) p := s.Prog(x86.AFMOVDP)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0 p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
......
...@@ -104,8 +104,8 @@ func moveByType(t ssa.Type) obj.As { ...@@ -104,8 +104,8 @@ func moveByType(t ssa.Type) obj.As {
// dest := dest(To) op src(From) // dest := dest(To) op src(From)
// and also returns the created obj.Prog so it // and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc). // may be further adjusted (offset, scale, etc).
func opregreg(op obj.As, dest, src int16) *obj.Prog { func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
p := gc.Prog(op) p := s.Prog(op)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = dest p.To.Reg = dest
...@@ -121,19 +121,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -121,19 +121,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r2 := v.Args[1].Reg() r2 := v.Args[1].Reg()
switch { switch {
case r == r1: case r == r1:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r2 p.From.Reg = r2
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case r == r2: case r == r2:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r1 p.From.Reg = r1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
default: default:
p := gc.Prog(x86.ALEAL) p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r1 p.From.Reg = r1
p.From.Scale = 1 p.From.Scale = 1
...@@ -160,7 +160,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -160,7 +160,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
opregreg(v.Op.Asm(), r, v.Args[1].Reg()) opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry: case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry:
// output 0 is carry/borrow, output 1 is the low 32 bits. // output 0 is carry/borrow, output 1 is the low 32 bits.
...@@ -168,7 +168,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -168,7 +168,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString()) v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
} }
opregreg(v.Op.Asm(), r, v.Args[1].Reg()) opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry: case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry:
// output 0 is carry/borrow, output 1 is the low 32 bits. // output 0 is carry/borrow, output 1 is the low 32 bits.
...@@ -176,7 +176,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -176,7 +176,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString()) v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -200,14 +200,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -200,14 +200,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
var c *obj.Prog var c *obj.Prog
switch v.Op { switch v.Op {
case ssa.Op386DIVL, ssa.Op386MODL: case ssa.Op386DIVL, ssa.Op386MODL:
c = gc.Prog(x86.ACMPL) c = s.Prog(x86.ACMPL)
j = gc.Prog(x86.AJEQ) j = s.Prog(x86.AJEQ)
gc.Prog(x86.ACDQ) //TODO: fix s.Prog(x86.ACDQ) //TODO: fix
case ssa.Op386DIVW, ssa.Op386MODW: case ssa.Op386DIVW, ssa.Op386MODW:
c = gc.Prog(x86.ACMPW) c = s.Prog(x86.ACMPW)
j = gc.Prog(x86.AJEQ) j = s.Prog(x86.AJEQ)
gc.Prog(x86.ACWD) s.Prog(x86.ACWD)
} }
c.From.Type = obj.TYPE_REG c.From.Type = obj.TYPE_REG
c.From.Reg = x c.From.Reg = x
...@@ -221,31 +221,31 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -221,31 +221,31 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// signed ints were sign extended above // signed ints were sign extended above
if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU || if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU ||
v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU { v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU {
c := gc.Prog(x86.AXORL) c := s.Prog(x86.AXORL)
c.From.Type = obj.TYPE_REG c.From.Type = obj.TYPE_REG
c.From.Reg = x86.REG_DX c.From.Reg = x86.REG_DX
c.To.Type = obj.TYPE_REG c.To.Type = obj.TYPE_REG
c.To.Reg = x86.REG_DX c.To.Reg = x86.REG_DX
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x p.From.Reg = x
// signed division, rest of the check for -1 case // signed division, rest of the check for -1 case
if j != nil { if j != nil {
j2 := gc.Prog(obj.AJMP) j2 := s.Prog(obj.AJMP)
j2.To.Type = obj.TYPE_BRANCH j2.To.Type = obj.TYPE_BRANCH
var n *obj.Prog var n *obj.Prog
if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW { if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW {
// n * -1 = -n // n * -1 = -n
n = gc.Prog(x86.ANEGL) n = s.Prog(x86.ANEGL)
n.To.Type = obj.TYPE_REG n.To.Type = obj.TYPE_REG
n.To.Reg = x86.REG_AX n.To.Reg = x86.REG_AX
} else { } else {
// n % -1 == 0 // n % -1 == 0
n = gc.Prog(x86.AXORL) n = s.Prog(x86.AXORL)
n.From.Type = obj.TYPE_REG n.From.Type = obj.TYPE_REG
n.From.Reg = x86.REG_DX n.From.Reg = x86.REG_DX
n.To.Type = obj.TYPE_REG n.To.Type = obj.TYPE_REG
...@@ -263,14 +263,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -263,14 +263,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Arg[0] is already in AX as it's the only register we allow // Arg[0] is already in AX as it's the only register we allow
// and DX is the only output we care about (the high bits) // and DX is the only output we care about (the high bits)
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
// IMULB puts the high portion in AH instead of DL, // IMULB puts the high portion in AH instead of DL,
// so move it to DL for consistency // so move it to DL for consistency
if v.Type.Size() == 1 { if v.Type.Size() == 1 {
m := gc.Prog(x86.AMOVB) m := s.Prog(x86.AMOVB)
m.From.Type = obj.TYPE_REG m.From.Type = obj.TYPE_REG
m.From.Reg = x86.REG_AH m.From.Reg = x86.REG_AH
m.To.Type = obj.TYPE_REG m.To.Type = obj.TYPE_REG
...@@ -279,7 +279,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -279,7 +279,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386MULLQU: case ssa.Op386MULLQU:
// AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]). // AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
...@@ -291,12 +291,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -291,12 +291,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(x86.AADDL) p := s.Prog(x86.AADDL)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p = gc.Prog(x86.ARCRL) p = s.Prog(x86.ARCRL)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = 1 p.From.Offset = 1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -307,25 +307,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -307,25 +307,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
a := v.Args[0].Reg() a := v.Args[0].Reg()
if r == a { if r == a {
if v.AuxInt == 1 { if v.AuxInt == 1 {
p := gc.Prog(x86.AINCL) p := s.Prog(x86.AINCL)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
return return
} }
if v.AuxInt == -1 { if v.AuxInt == -1 {
p := gc.Prog(x86.ADECL) p := s.Prog(x86.ADECL)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
return return
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
return return
} }
p := gc.Prog(x86.ALEAL) p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = a p.From.Reg = a
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
...@@ -337,7 +337,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -337,7 +337,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -362,14 +362,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -362,14 +362,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.Op386SBBLcarrymask: case ssa.Op386SBBLcarrymask:
r := v.Reg() r := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -377,7 +377,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -377,7 +377,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8: case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8:
r := v.Args[0].Reg() r := v.Args[0].Reg()
i := v.Args[1].Reg() i := v.Args[1].Reg()
p := gc.Prog(x86.ALEAL) p := s.Prog(x86.ALEAL)
switch v.Op { switch v.Op {
case ssa.Op386LEAL1: case ssa.Op386LEAL1:
p.From.Scale = 1 p.From.Scale = 1
...@@ -398,7 +398,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -398,7 +398,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386LEAL: case ssa.Op386LEAL:
p := gc.Prog(x86.ALEAL) p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -406,26 +406,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -406,26 +406,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB, case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB: ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB:
opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.Op386UCOMISS, ssa.Op386UCOMISD: case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
// Go assembler has swapped operands for UCOMISx relative to CMP, // Go assembler has swapped operands for UCOMISx relative to CMP,
// must account for that right here. // must account for that right here.
opregreg(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg()) opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst: case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst: case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
case ssa.Op386MOVLconst: case ssa.Op386MOVLconst:
x := v.Reg() x := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -437,7 +437,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -437,7 +437,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst: case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
x := v.Reg() x := v.Reg()
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -449,7 +449,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -449,7 +449,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
literal = fmt.Sprintf("$f32.%08x", math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt))))) literal = fmt.Sprintf("$f32.%08x", math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt)))))
} }
p := gc.Prog(x86.ALEAL) p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN p.From.Name = obj.NAME_EXTERN
p.From.Sym = obj.Linklookup(gc.Ctxt, literal, 0) p.From.Sym = obj.Linklookup(gc.Ctxt, literal, 0)
...@@ -457,21 +457,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -457,21 +457,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2: case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload: case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386MOVSDloadidx8: case ssa.Op386MOVSDloadidx8:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -480,7 +480,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -480,7 +480,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4: case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -489,7 +489,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -489,7 +489,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386MOVWloadidx2: case ssa.Op386MOVWloadidx2:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
...@@ -503,7 +503,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -503,7 +503,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == x86.REG_SP { if i == x86.REG_SP {
r, i = i, r r, i = i, r
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = r p.From.Reg = r
p.From.Scale = 1 p.From.Scale = 1
...@@ -512,14 +512,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -512,14 +512,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore: case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg() p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.Op386MOVSDstoreidx8: case ssa.Op386MOVSDstoreidx8:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -528,7 +528,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -528,7 +528,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = v.Args[1].Reg() p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4: case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -537,7 +537,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -537,7 +537,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = v.Args[1].Reg() p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.Op386MOVWstoreidx2: case ssa.Op386MOVWstoreidx2:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -551,7 +551,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -551,7 +551,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == x86.REG_SP { if i == x86.REG_SP {
r, i = i, r r, i = i, r
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg() p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -560,7 +560,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -560,7 +560,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = i p.To.Index = i
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst: case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff() sc := v.AuxValAndOff()
p.From.Offset = sc.Val() p.From.Offset = sc.Val()
...@@ -568,7 +568,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -568,7 +568,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off()) gc.AddAux2(&p.To, v, sc.Off())
case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1: case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff() sc := v.AuxValAndOff()
p.From.Offset = sc.Val() p.From.Offset = sc.Val()
...@@ -593,14 +593,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -593,14 +593,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD, ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL, ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS: ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS:
opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg()) opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.Op386DUFFZERO: case ssa.Op386DUFFZERO:
p := gc.Prog(obj.ADUFFZERO) p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffzero p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
case ssa.Op386DUFFCOPY: case ssa.Op386DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY) p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffcopy p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
...@@ -612,14 +612,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -612,14 +612,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg() x := v.Args[0].Reg()
y := v.Reg() y := v.Reg()
if x != y { if x != y {
opregreg(moveByType(v.Type), y, x) opregreg(s, moveByType(v.Type), y, x)
} }
case ssa.OpLoadReg: case ssa.OpLoadReg:
if v.Type.IsFlags() { if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString()) v.Fatalf("load flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(loadByType(v.Type)) p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0]) gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
...@@ -629,7 +629,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -629,7 +629,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString()) v.Fatalf("store flags not implemented: %v", v.LongString())
return return
} }
p := gc.Prog(storeByType(v.Type)) p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v) gc.AddrAuto(&p.To, v)
...@@ -642,7 +642,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -642,7 +642,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// near CanUse1InsnTLS for a detailed explanation of these instructions. // near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) { if x86.CanUse1InsnTLS(gc.Ctxt) {
// MOVL (TLS), r // MOVL (TLS), r
p := gc.Prog(x86.AMOVL) p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -650,12 +650,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -650,12 +650,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
// MOVL TLS, r // MOVL TLS, r
// MOVL (r)(TLS*1), r // MOVL (r)(TLS*1), r
p := gc.Prog(x86.AMOVL) p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
q := gc.Prog(x86.AMOVL) q := s.Prog(x86.AMOVL)
q.From.Type = obj.TYPE_MEM q.From.Type = obj.TYPE_MEM
q.From.Reg = r q.From.Reg = r
q.From.Index = x86.REG_TLS q.From.Index = x86.REG_TLS
...@@ -672,13 +672,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -672,13 +672,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() { if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.Op386BSFL, ssa.Op386BSFW, case ssa.Op386BSFL, ssa.Op386BSFW,
ssa.Op386BSRL, ssa.Op386BSRW, ssa.Op386BSRL, ssa.Op386BSRW,
ssa.Op386SQRTSD: ssa.Op386SQRTSD:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
...@@ -690,38 +690,38 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -690,38 +690,38 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.Op386SETB, ssa.Op386SETBE, ssa.Op386SETB, ssa.Op386SETBE,
ssa.Op386SETORD, ssa.Op386SETNAN, ssa.Op386SETORD, ssa.Op386SETNAN,
ssa.Op386SETA, ssa.Op386SETAE: ssa.Op386SETA, ssa.Op386SETAE:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.Op386SETNEF: case ssa.Op386SETNEF:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
q := gc.Prog(x86.ASETPS) q := s.Prog(x86.ASETPS)
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX q.To.Reg = x86.REG_AX
opregreg(x86.AORL, v.Reg(), x86.REG_AX) opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
case ssa.Op386SETEQF: case ssa.Op386SETEQF:
p := gc.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
q := gc.Prog(x86.ASETPC) q := s.Prog(x86.ASETPC)
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX q.To.Reg = x86.REG_AX
opregreg(x86.AANDL, v.Reg(), x86.REG_AX) opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
case ssa.Op386InvertFlags: case ssa.Op386InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT: case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.Op386REPSTOSL: case ssa.Op386REPSTOSL:
gc.Prog(x86.AREP) s.Prog(x86.AREP)
gc.Prog(x86.ASTOSL) s.Prog(x86.ASTOSL)
case ssa.Op386REPMOVSL: case ssa.Op386REPMOVSL:
gc.Prog(x86.AREP) s.Prog(x86.AREP)
gc.Prog(x86.AMOVSL) s.Prog(x86.AMOVSL)
case ssa.Op386LoweredNilCheck: case ssa.Op386LoweredNilCheck:
// Issue a load which will fault if the input is nil. // Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg). // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
...@@ -729,7 +729,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -729,7 +729,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// but it doesn't have false dependency on AX. // but it doesn't have false dependency on AX.
// Or maybe allocate an output register and use MOVL (reg),reg2 ? // Or maybe allocate an output register and use MOVL (reg),reg2 ?
// That trades clobbering flags for clobbering a register. // That trades clobbering flags for clobbering a register.
p := gc.Prog(x86.ATESTB) p := s.Prog(x86.ATESTB)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
...@@ -775,7 +775,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -775,7 +775,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind { switch b.Kind {
case ssa.BlockPlain: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
...@@ -783,25 +783,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -783,25 +783,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in rax: // defer returns in rax:
// 0 if we should continue executing // 0 if we should continue executing
// 1 if we should jump to deferreturn call // 1 if we should jump to deferreturn call
p := gc.Prog(x86.ATESTL) p := s.Prog(x86.ATESTL)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX p.To.Reg = x86.REG_AX
p = gc.Prog(x86.AJNE) p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockExit: case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet: case ssa.BlockRet:
gc.Prog(obj.ARET) s.Prog(obj.ARET)
case ssa.BlockRetJmp: case ssa.BlockRetJmp:
p := gc.Prog(obj.AJMP) p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym) p.To.Sym = b.Aux.(*obj.LSym)
...@@ -822,19 +822,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -822,19 +822,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog var p *obj.Prog
switch next { switch next {
case b.Succs[0].Block(): case b.Succs[0].Block():
p = gc.Prog(jmp.invasm) p = s.Prog(jmp.invasm)
likely *= -1 likely *= -1
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block(): case b.Succs[1].Block():
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default: default:
p = gc.Prog(jmp.asm) p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP) q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment