Commit 1e692454 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/internal/obj/s390x: make assembler almost concurrency-safe

CL 39922 made the arm assembler concurrency-safe.
This CL does the same, but for s390x.
The approach is similar: introduce ctxtz to hold
function-local state and thread it through
the assembler as necessary.

One race remains after this CL, similar to CL 40252.

That race is conceptually unrelated to this refactoring,
and will be addressed in a separate CL.

Passes toolstash-check -all.

Updates #15756

Change-Id: Iabf17aa242b70c0b078c2e85dae3d93a5e512372
Reviewed-on: https://go-review.googlesource.com/40371
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarMichael Munday <munday@ca.ibm.com>
parent f95de5c6
...@@ -727,7 +727,6 @@ type Link struct { ...@@ -727,7 +727,6 @@ type Link struct {
InlTree InlTree // global inlining tree used by gc/inl.go InlTree InlTree // global inlining tree used by gc/inl.go
Imports []string Imports []string
Plan9privates *LSym Plan9privates *LSym
Printp *Prog
Instoffset int64 Instoffset int64
Autosize int32 Autosize int32
Pc int64 Pc int64
......
...@@ -36,6 +36,18 @@ import ( ...@@ -36,6 +36,18 @@ import (
"sort" "sort"
) )
// ctxtz holds state while assembling a single function.
// Each function gets a fresh ctxtz.
// This allows for multiple functions to be safely concurrently assembled.
type ctxtz struct {
ctxt *obj.Link
newprog obj.ProgAlloc
cursym *obj.LSym
autosize int32
instoffset int64
pc int64
}
// instruction layout. // instruction layout.
const ( const (
funcAlign = 16 funcAlign = 16
...@@ -390,50 +402,50 @@ func spanz(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -390,50 +402,50 @@ func spanz(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if p == nil || p.Link == nil { // handle external functions and ELF section symbols if p == nil || p.Link == nil { // handle external functions and ELF section symbols
return return
} }
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset)
if oprange[AORW&obj.AMask] == nil { if oprange[AORW&obj.AMask] == nil {
ctxt.Diag("s390x ops not initialized, call s390x.buildop first") ctxt.Diag("s390x ops not initialized, call s390x.buildop first")
} }
c := ctxtz{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
buffer := make([]byte, 0) buffer := make([]byte, 0)
changed := true changed := true
loop := 0 loop := 0
for changed { for changed {
if loop > 10 { if loop > 10 {
ctxt.Diag("stuck in spanz loop") c.ctxt.Diag("stuck in spanz loop")
break break
} }
changed = false changed = false
buffer = buffer[:0] buffer = buffer[:0]
ctxt.Cursym.R = make([]obj.Reloc, 0) c.cursym.R = make([]obj.Reloc, 0)
for p := cursym.Text; p != nil; p = p.Link { for p := c.cursym.Text; p != nil; p = p.Link {
pc := int64(len(buffer)) pc := int64(len(buffer))
if pc != p.Pc { if pc != p.Pc {
changed = true changed = true
} }
p.Pc = pc p.Pc = pc
ctxt.Pc = p.Pc c.pc = p.Pc
asmout(ctxt, p, &buffer) c.asmout(p, &buffer)
if pc == int64(len(buffer)) { if pc == int64(len(buffer)) {
switch p.As { switch p.As {
case obj.ANOP, obj.AFUNCDATA, obj.APCDATA, obj.ATEXT: case obj.ANOP, obj.AFUNCDATA, obj.APCDATA, obj.ATEXT:
// ok // ok
default: default:
ctxt.Diag("zero-width instruction\n%v", p) c.ctxt.Diag("zero-width instruction\n%v", p)
} }
} }
} }
loop++ loop++
} }
cursym.Size = int64(len(buffer)) c.cursym.Size = int64(len(buffer))
if cursym.Size%funcAlign != 0 { if c.cursym.Size%funcAlign != 0 {
cursym.Size += funcAlign - (cursym.Size % funcAlign) c.cursym.Size += funcAlign - (c.cursym.Size % funcAlign)
} }
cursym.Grow(cursym.Size) c.cursym.Grow(c.cursym.Size)
copy(cursym.P, buffer) copy(c.cursym.P, buffer)
} }
func isint32(v int64) bool { func isint32(v int64) bool {
...@@ -444,7 +456,7 @@ func isuint32(v uint64) bool { ...@@ -444,7 +456,7 @@ func isuint32(v uint64) bool {
return uint64(uint32(v)) == v return uint64(uint32(v)) == v
} }
func aclass(ctxt *obj.Link, a *obj.Addr) int { func (c *ctxtz) aclass(a *obj.Addr) int {
switch a.Type { switch a.Type {
case obj.TYPE_NONE: case obj.TYPE_NONE:
return C_NONE return C_NONE
...@@ -472,9 +484,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ...@@ -472,9 +484,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
// must have a symbol // must have a symbol
break break
} }
ctxt.Instoffset = a.Offset c.instoffset = a.Offset
if a.Sym.Type == obj.STLSBSS { if a.Sym.Type == obj.STLSBSS {
if ctxt.Flag_shared { if c.ctxt.Flag_shared {
return C_TLS_IE // initial exec model return C_TLS_IE // initial exec model
} }
return C_TLS_LE // local exec model return C_TLS_LE // local exec model
...@@ -485,25 +497,25 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ...@@ -485,25 +497,25 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_GOTADDR return C_GOTADDR
case obj.NAME_AUTO: case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset c.instoffset = int64(c.autosize) + a.Offset
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SAUTO return C_SAUTO
} }
return C_LAUTO return C_LAUTO
case obj.NAME_PARAM: case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize() c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SAUTO return C_SAUTO
} }
return C_LAUTO return C_LAUTO
case obj.NAME_NONE: case obj.NAME_NONE:
ctxt.Instoffset = a.Offset c.instoffset = a.Offset
if ctxt.Instoffset == 0 { if c.instoffset == 0 {
return C_ZOREG return C_ZOREG
} }
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SOREG return C_SOREG
} }
return C_LOREG return C_LOREG
...@@ -518,18 +530,18 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ...@@ -518,18 +530,18 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if f64, ok := a.Val.(float64); ok && math.Float64bits(f64) == 0 { if f64, ok := a.Val.(float64); ok && math.Float64bits(f64) == 0 {
return C_ZCON return C_ZCON
} }
ctxt.Diag("cannot handle the floating point constant %v", a.Val) c.ctxt.Diag("cannot handle the floating point constant %v", a.Val)
case obj.TYPE_CONST, case obj.TYPE_CONST,
obj.TYPE_ADDR: obj.TYPE_ADDR:
switch a.Name { switch a.Name {
case obj.NAME_NONE: case obj.NAME_NONE:
ctxt.Instoffset = a.Offset c.instoffset = a.Offset
if a.Reg != 0 { if a.Reg != 0 {
if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG { if -BIG <= c.instoffset && c.instoffset <= BIG {
return C_SACON return C_SACON
} }
if isint32(ctxt.Instoffset) { if isint32(c.instoffset) {
return C_LACON return C_LACON
} }
return C_DACON return C_DACON
...@@ -542,7 +554,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ...@@ -542,7 +554,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if s == nil { if s == nil {
break break
} }
ctxt.Instoffset = a.Offset c.instoffset = a.Offset
if s.Type == obj.SCONST { if s.Type == obj.SCONST {
goto consize goto consize
} }
...@@ -550,15 +562,15 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ...@@ -550,15 +562,15 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_SYMADDR return C_SYMADDR
case obj.NAME_AUTO: case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset c.instoffset = int64(c.autosize) + a.Offset
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SACON return C_SACON
} }
return C_LACON return C_LACON
case obj.NAME_PARAM: case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize() c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SACON return C_SACON
} }
return C_LACON return C_LACON
...@@ -567,32 +579,32 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ...@@ -567,32 +579,32 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_GOK return C_GOK
consize: consize:
if ctxt.Instoffset == 0 { if c.instoffset == 0 {
return C_ZCON return C_ZCON
} }
if ctxt.Instoffset >= 0 { if c.instoffset >= 0 {
if ctxt.Instoffset <= 0x7fff { if c.instoffset <= 0x7fff {
return C_SCON return C_SCON
} }
if ctxt.Instoffset <= 0xffff { if c.instoffset <= 0xffff {
return C_ANDCON return C_ANDCON
} }
if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */ if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
return C_UCON return C_UCON
} }
if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) { if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
return C_LCON return C_LCON
} }
return C_DCON return C_DCON
} }
if ctxt.Instoffset >= -0x8000 { if c.instoffset >= -0x8000 {
return C_ADDCON return C_ADDCON
} }
if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) { if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
return C_UCON return C_UCON
} }
if isint32(ctxt.Instoffset) { if isint32(c.instoffset) {
return C_LCON return C_LCON
} }
return C_DCON return C_DCON
...@@ -604,14 +616,14 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ...@@ -604,14 +616,14 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_GOK return C_GOK
} }
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { func (c *ctxtz) oplook(p *obj.Prog) *Optab {
a1 := int(p.Optab) a1 := int(p.Optab)
if a1 != 0 { if a1 != 0 {
return &optab[a1-1] return &optab[a1-1]
} }
a1 = int(p.From.Class) a1 = int(p.From.Class)
if a1 == 0 { if a1 == 0 {
a1 = aclass(ctxt, &p.From) + 1 a1 = c.aclass(&p.From) + 1
p.From.Class = int8(a1) p.From.Class = int8(a1)
} }
...@@ -620,7 +632,7 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { ...@@ -620,7 +632,7 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
if p.From3 != nil { if p.From3 != nil {
a3 = int(p.From3.Class) a3 = int(p.From3.Class)
if a3 == 0 { if a3 == 0 {
a3 = aclass(ctxt, p.From3) + 1 a3 = c.aclass(p.From3) + 1
p.From3.Class = int8(a3) p.From3.Class = int8(a3)
} }
} }
...@@ -628,7 +640,7 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { ...@@ -628,7 +640,7 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
a3-- a3--
a4 := int(p.To.Class) a4 := int(p.To.Class)
if a4 == 0 { if a4 == 0 {
a4 = aclass(ctxt, &p.To) + 1 a4 = c.aclass(&p.To) + 1
p.To.Class = int8(a4) p.To.Class = int8(a4)
} }
...@@ -660,8 +672,8 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { ...@@ -660,8 +672,8 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
} }
// cannot find a case; abort // cannot find a case; abort
ctxt.Diag("illegal combination %v %v %v %v %v\n", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4)) c.ctxt.Diag("illegal combination %v %v %v %v %v\n", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
ctxt.Diag("prog: %v\n", p) c.ctxt.Diag("prog: %v\n", p)
return nil return nil
} }
...@@ -2484,13 +2496,13 @@ func oclass(a *obj.Addr) int { ...@@ -2484,13 +2496,13 @@ func oclass(a *obj.Addr) int {
// Add a relocation for the immediate in a RIL style instruction. // Add a relocation for the immediate in a RIL style instruction.
// The addend will be adjusted as required. // The addend will be adjusted as required.
func addrilreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc { func (c *ctxtz) addrilreloc(sym *obj.LSym, add int64) *obj.Reloc {
if sym == nil { if sym == nil {
ctxt.Diag("require symbol to apply relocation") c.ctxt.Diag("require symbol to apply relocation")
} }
offset := int64(2) // relocation offset from start of instruction offset := int64(2) // relocation offset from start of instruction
rel := obj.Addrel(ctxt.Cursym) rel := obj.Addrel(c.cursym)
rel.Off = int32(ctxt.Pc + offset) rel.Off = int32(c.pc + offset)
rel.Siz = 4 rel.Siz = 4
rel.Sym = sym rel.Sym = sym
rel.Add = add + offset + int64(rel.Siz) rel.Add = add + offset + int64(rel.Siz)
...@@ -2498,13 +2510,13 @@ func addrilreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc { ...@@ -2498,13 +2510,13 @@ func addrilreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc {
return rel return rel
} }
func addrilrelocoffset(ctxt *obj.Link, sym *obj.LSym, add, offset int64) *obj.Reloc { func (c *ctxtz) addrilrelocoffset(sym *obj.LSym, add, offset int64) *obj.Reloc {
if sym == nil { if sym == nil {
ctxt.Diag("require symbol to apply relocation") c.ctxt.Diag("require symbol to apply relocation")
} }
offset += int64(2) // relocation offset from start of instruction offset += int64(2) // relocation offset from start of instruction
rel := obj.Addrel(ctxt.Cursym) rel := obj.Addrel(c.cursym)
rel.Off = int32(ctxt.Pc + offset) rel.Off = int32(c.pc + offset)
rel.Siz = 4 rel.Siz = 4
rel.Sym = sym rel.Sym = sym
rel.Add = add + offset + int64(rel.Siz) rel.Add = add + offset + int64(rel.Siz)
...@@ -2514,13 +2526,13 @@ func addrilrelocoffset(ctxt *obj.Link, sym *obj.LSym, add, offset int64) *obj.Re ...@@ -2514,13 +2526,13 @@ func addrilrelocoffset(ctxt *obj.Link, sym *obj.LSym, add, offset int64) *obj.Re
// Add a CALL relocation for the immediate in a RIL style instruction. // Add a CALL relocation for the immediate in a RIL style instruction.
// The addend will be adjusted as required. // The addend will be adjusted as required.
func addcallreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc { func (c *ctxtz) addcallreloc(sym *obj.LSym, add int64) *obj.Reloc {
if sym == nil { if sym == nil {
ctxt.Diag("require symbol to apply relocation") c.ctxt.Diag("require symbol to apply relocation")
} }
offset := int64(2) // relocation offset from start of instruction offset := int64(2) // relocation offset from start of instruction
rel := obj.Addrel(ctxt.Cursym) rel := obj.Addrel(c.cursym)
rel.Off = int32(ctxt.Pc + offset) rel.Off = int32(c.pc + offset)
rel.Siz = 4 rel.Siz = 4
rel.Sym = sym rel.Sym = sym
rel.Add = add + offset + int64(rel.Siz) rel.Add = add + offset + int64(rel.Siz)
...@@ -2528,7 +2540,7 @@ func addcallreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc { ...@@ -2528,7 +2540,7 @@ func addcallreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc {
return rel return rel
} }
func branchMask(ctxt *obj.Link, p *obj.Prog) uint32 { func (c *ctxtz) branchMask(p *obj.Prog) uint32 {
switch p.As { switch p.As {
case ABEQ, ACMPBEQ, ACMPUBEQ, AMOVDEQ: case ABEQ, ACMPBEQ, ACMPUBEQ, AMOVDEQ:
return 0x8 return 0x8
...@@ -2551,17 +2563,16 @@ func branchMask(ctxt *obj.Link, p *obj.Prog) uint32 { ...@@ -2551,17 +2563,16 @@ func branchMask(ctxt *obj.Link, p *obj.Prog) uint32 {
case ABVS: case ABVS:
return 0x1 // unordered return 0x1 // unordered
} }
ctxt.Diag("unknown conditional branch %v", p.As) c.ctxt.Diag("unknown conditional branch %v", p.As)
return 0xF return 0xF
} }
func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) {
o := oplook(ctxt, p) o := c.oplook(p)
ctxt.Printp = p
switch o.type_ { switch o.type_ {
default: default:
ctxt.Diag("unknown type %d", o.type_) c.ctxt.Diag("unknown type %d", o.type_)
case 0: // PSEUDO OPS case 0: // PSEUDO OPS
break break
...@@ -2569,7 +2580,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2569,7 +2580,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 1: // mov reg reg case 1: // mov reg reg
switch p.As { switch p.As {
default: default:
ctxt.Diag("unhandled operation: %v", p.As) c.ctxt.Diag("unhandled operation: %v", p.As)
case AMOVD: case AMOVD:
zRRE(op_LGR, uint32(p.To.Reg), uint32(p.From.Reg), asm) zRRE(op_LGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
// sign extend // sign extend
...@@ -2606,7 +2617,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2606,7 +2617,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
switch p.As { switch p.As {
default: default:
ctxt.Diag("invalid opcode") c.ctxt.Diag("invalid opcode")
case AADD: case AADD:
opcode = op_AGRK opcode = op_AGRK
case AADDC: case AADDC:
...@@ -2668,7 +2679,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2668,7 +2679,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 3: // mov $constant reg case 3: // mov $constant reg
v := vregoff(ctxt, &p.From) v := c.vregoff(&p.From)
switch p.As { switch p.As {
case AMOVBZ: case AMOVBZ:
v = int64(uint8(v)) v = int64(uint8(v))
...@@ -2760,7 +2771,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2760,7 +2771,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 7: // shift/rotate reg [reg] reg case 7: // shift/rotate reg [reg] reg
d2 := vregoff(ctxt, &p.From) d2 := c.vregoff(&p.From)
b2 := p.From.Reg b2 := p.From.Reg
r3 := p.Reg r3 := p.Reg
if r3 == 0 { if r3 == 0 {
...@@ -2791,7 +2802,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2791,7 +2802,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 8: // find leftmost one case 8: // find leftmost one
if p.To.Reg&1 != 0 { if p.To.Reg&1 != 0 {
ctxt.Diag("target must be an even-numbered register") c.ctxt.Diag("target must be an even-numbered register")
} }
// FLOGR also writes a mask to p.To.Reg+1. // FLOGR also writes a mask to p.To.Reg+1.
zRRE(op_FLOGR, uint32(p.To.Reg), uint32(p.From.Reg), asm) zRRE(op_FLOGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
...@@ -2851,13 +2862,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2851,13 +2862,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
zRIL(_c, op_BRCL, 0xF, uint32(v), asm) zRIL(_c, op_BRCL, 0xF, uint32(v), asm)
} }
if p.To.Sym != nil { if p.To.Sym != nil {
addcallreloc(ctxt, p.To.Sym, p.To.Offset) c.addcallreloc(p.To.Sym, p.To.Offset)
} }
} }
case 12: case 12:
r1 := p.To.Reg r1 := p.To.Reg
d2 := vregoff(ctxt, &p.From) d2 := c.vregoff(&p.From)
b2 := p.From.Reg b2 := p.From.Reg
if b2 == 0 { if b2 == 0 {
b2 = o.param b2 = o.param
...@@ -2929,18 +2940,18 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2929,18 +2940,18 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
if p.Pcond != nil { if p.Pcond != nil {
v = int32((p.Pcond.Pc - p.Pc) >> 1) v = int32((p.Pcond.Pc - p.Pc) >> 1)
} }
mask := branchMask(ctxt, p) mask := c.branchMask(p)
if p.To.Sym == nil && int32(int16(v)) == v { if p.To.Sym == nil && int32(int16(v)) == v {
zRI(op_BRC, mask, uint32(v), asm) zRI(op_BRC, mask, uint32(v), asm)
} else { } else {
zRIL(_c, op_BRCL, mask, uint32(v), asm) zRIL(_c, op_BRCL, mask, uint32(v), asm)
} }
if p.To.Sym != nil { if p.To.Sym != nil {
addrilreloc(ctxt, p.To.Sym, p.To.Offset) c.addrilreloc(p.To.Sym, p.To.Offset)
} }
case 17: // move on condition case 17: // move on condition
m3 := branchMask(ctxt, p) m3 := c.branchMask(p)
zRRF(op_LOCGR, m3, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm) zRRF(op_LOCGR, m3, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
case 18: // br/bl reg case 18: // br/bl reg
...@@ -2951,16 +2962,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2951,16 +2962,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 19: // mov $sym+n(SB) reg case 19: // mov $sym+n(SB) reg
d := vregoff(ctxt, &p.From) d := c.vregoff(&p.From)
zRIL(_b, op_LARL, uint32(p.To.Reg), 0, asm) zRIL(_b, op_LARL, uint32(p.To.Reg), 0, asm)
if d&1 != 0 { if d&1 != 0 {
zRX(op_LA, uint32(p.To.Reg), uint32(p.To.Reg), 0, 1, asm) zRX(op_LA, uint32(p.To.Reg), uint32(p.To.Reg), 0, 1, asm)
d -= 1 d -= 1
} }
addrilreloc(ctxt, p.From.Sym, d) c.addrilreloc(p.From.Sym, d)
case 21: // subtract $constant [reg] reg case 21: // subtract $constant [reg] reg
v := vregoff(ctxt, &p.From) v := c.vregoff(&p.From)
r := p.Reg r := p.Reg
if r == 0 { if r == 0 {
r = p.To.Reg r = p.To.Reg
...@@ -2982,7 +2993,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -2982,7 +2993,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 22: // add/multiply $constant [reg] reg case 22: // add/multiply $constant [reg] reg
v := vregoff(ctxt, &p.From) v := c.vregoff(&p.From)
r := p.Reg r := p.Reg
if r == 0 { if r == 0 {
r = p.To.Reg r = p.To.Reg
...@@ -3028,10 +3039,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3028,10 +3039,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 23: // 64-bit logical op $constant reg case 23: // 64-bit logical op $constant reg
// TODO(mundaym): merge with case 24. // TODO(mundaym): merge with case 24.
v := vregoff(ctxt, &p.From) v := c.vregoff(&p.From)
switch p.As { switch p.As {
default: default:
ctxt.Diag("%v is not supported", p) c.ctxt.Diag("%v is not supported", p)
case AAND: case AAND:
if v >= 0 { // needs zero extend if v >= 0 { // needs zero extend
zRIL(_a, op_LGFI, REGTMP, uint32(v), asm) zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
...@@ -3060,7 +3071,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3060,7 +3071,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 24: // 32-bit logical op $constant reg case 24: // 32-bit logical op $constant reg
v := vregoff(ctxt, &p.From) v := c.vregoff(&p.From)
switch p.As { switch p.As {
case AANDW: case AANDW:
if uint32(v&0xffff0000) == 0xffff0000 { if uint32(v&0xffff0000) == 0xffff0000 {
...@@ -3083,7 +3094,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3083,7 +3094,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 26: // MOVD $offset(base)(index), reg case 26: // MOVD $offset(base)(index), reg
v := regoff(ctxt, &p.From) v := c.regoff(&p.From)
r := p.From.Reg r := p.From.Reg
if r == 0 { if r == 0 {
r = o.param r = o.param
...@@ -3099,7 +3110,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3099,7 +3110,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 31: // dword case 31: // dword
wd := uint64(vregoff(ctxt, &p.From)) wd := uint64(c.vregoff(&p.From))
*asm = append(*asm, *asm = append(*asm,
uint8(wd>>56), uint8(wd>>56),
uint8(wd>>48), uint8(wd>>48),
...@@ -3114,7 +3125,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3114,7 +3125,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
var opcode uint32 var opcode uint32
switch p.As { switch p.As {
default: default:
ctxt.Diag("invalid opcode") c.ctxt.Diag("invalid opcode")
case AFADD: case AFADD:
opcode = op_ADBR opcode = op_ADBR
case AFADDS: case AFADDS:
...@@ -3165,7 +3176,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3165,7 +3176,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
var opcode uint32 var opcode uint32
switch p.As { switch p.As {
default: default:
ctxt.Diag("invalid opcode") c.ctxt.Diag("invalid opcode")
case AFMADD: case AFMADD:
opcode = op_MADBR opcode = op_MADBR
case AFMADDS: case AFMADDS:
...@@ -3178,7 +3189,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3178,7 +3189,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
zRRD(opcode, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), asm) zRRD(opcode, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), asm)
case 35: // mov reg mem (no relocation) case 35: // mov reg mem (no relocation)
d2 := regoff(ctxt, &p.To) d2 := c.regoff(&p.To)
b2 := p.To.Reg b2 := p.To.Reg
if b2 == 0 { if b2 == 0 {
b2 = o.param b2 = o.param
...@@ -3192,10 +3203,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3192,10 +3203,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
x2 = REGTMP x2 = REGTMP
d2 = 0 d2 = 0
} }
zRXY(zopstore(ctxt, p.As), uint32(p.From.Reg), uint32(x2), uint32(b2), uint32(d2), asm) zRXY(c.zopstore(p.As), uint32(p.From.Reg), uint32(x2), uint32(b2), uint32(d2), asm)
case 36: // mov mem reg (no relocation) case 36: // mov mem reg (no relocation)
d2 := regoff(ctxt, &p.From) d2 := c.regoff(&p.From)
b2 := p.From.Reg b2 := p.From.Reg
if b2 == 0 { if b2 == 0 {
b2 = o.param b2 = o.param
...@@ -3209,10 +3220,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3209,10 +3220,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
x2 = REGTMP x2 = REGTMP
d2 = 0 d2 = 0
} }
zRXY(zopload(ctxt, p.As), uint32(p.To.Reg), uint32(x2), uint32(b2), uint32(d2), asm) zRXY(c.zopload(p.As), uint32(p.To.Reg), uint32(x2), uint32(b2), uint32(d2), asm)
case 40: // word/byte case 40: // word/byte
wd := uint32(regoff(ctxt, &p.From)) wd := uint32(c.regoff(&p.From))
if p.As == AWORD { //WORD if p.As == AWORD { //WORD
*asm = append(*asm, uint8(wd>>24), uint8(wd>>16), uint8(wd>>8), uint8(wd)) *asm = append(*asm, uint8(wd>>24), uint8(wd>>16), uint8(wd>>8), uint8(wd))
} else { //BYTE } else { //BYTE
...@@ -3232,9 +3243,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3232,9 +3243,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 48: // floating-point round to integer case 48: // floating-point round to integer
m3 := vregoff(ctxt, &p.From) m3 := c.vregoff(&p.From)
if 0 > m3 || m3 > 7 { if 0 > m3 || m3 > 7 {
ctxt.Diag("mask (%v) must be in the range [0, 7]", m3) c.ctxt.Diag("mask (%v) must be in the range [0, 7]", m3)
} }
var opcode uint32 var opcode uint32
switch p.As { switch p.As {
...@@ -3263,21 +3274,21 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3263,21 +3274,21 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 70: // cmp reg reg case 70: // cmp reg reg
if p.As == ACMPW || p.As == ACMPWU { if p.As == ACMPW || p.As == ACMPWU {
zRR(zoprr(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm) zRR(c.zoprr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm)
} else { } else {
zRRE(zoprre(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm) zRRE(c.zoprre(p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm)
} }
case 71: // cmp reg $constant case 71: // cmp reg $constant
v := vregoff(ctxt, &p.To) v := c.vregoff(&p.To)
switch p.As { switch p.As {
case ACMP, ACMPW: case ACMP, ACMPW:
if int64(int32(v)) != v { if int64(int32(v)) != v {
ctxt.Diag("%v overflows an int32", v) c.ctxt.Diag("%v overflows an int32", v)
} }
case ACMPU, ACMPWU: case ACMPU, ACMPWU:
if int64(uint32(v)) != v { if int64(uint32(v)) != v {
ctxt.Diag("%v overflows a uint32", v) c.ctxt.Diag("%v overflows a uint32", v)
} }
} }
if p.As == ACMP && int64(int16(v)) == v { if p.As == ACMP && int64(int16(v)) == v {
...@@ -3285,12 +3296,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3285,12 +3296,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} else if p.As == ACMPW && int64(int16(v)) == v { } else if p.As == ACMPW && int64(int16(v)) == v {
zRI(op_CHI, uint32(p.From.Reg), uint32(v), asm) zRI(op_CHI, uint32(p.From.Reg), uint32(v), asm)
} else { } else {
zRIL(_a, zopril(ctxt, p.As), uint32(p.From.Reg), uint32(v), asm) zRIL(_a, c.zopril(p.As), uint32(p.From.Reg), uint32(v), asm)
} }
case 72: // mov $constant mem case 72: // mov $constant mem
v := regoff(ctxt, &p.From) v := c.regoff(&p.From)
d := regoff(ctxt, &p.To) d := c.regoff(&p.To)
r := p.To.Reg r := p.To.Reg
x := p.To.Index x := p.To.Index
if r == 0 { if r == 0 {
...@@ -3337,19 +3348,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3337,19 +3348,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
d = 0 d = 0
} }
zRXY(zopstore(ctxt, p.As), REGTMP2, uint32(x), uint32(r), uint32(d), asm) zRXY(c.zopstore(p.As), REGTMP2, uint32(x), uint32(r), uint32(d), asm)
} }
case 73: // mov $constant addr (including relocation) case 73: // mov $constant addr (including relocation)
v := regoff(ctxt, &p.From) v := c.regoff(&p.From)
d := regoff(ctxt, &p.To) d := c.regoff(&p.To)
a := uint32(0) a := uint32(0)
if d&1 != 0 { if d&1 != 0 {
d -= 1 d -= 1
a = 1 a = 1
} }
zRIL(_b, op_LARL, REGTMP, uint32(d), asm) zRIL(_b, op_LARL, REGTMP, uint32(d), asm)
addrilreloc(ctxt, p.To.Sym, int64(d)) c.addrilreloc(p.To.Sym, int64(d))
if int32(int16(v)) == v { if int32(int16(v)) == v {
var opcode uint32 var opcode uint32
switch p.As { switch p.As {
...@@ -3369,11 +3380,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3369,11 +3380,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
} else { } else {
zRIL(_a, op_LGFI, REGTMP2, uint32(v), asm) zRIL(_a, op_LGFI, REGTMP2, uint32(v), asm)
zRXY(zopstore(ctxt, p.As), REGTMP2, 0, REGTMP, a, asm) zRXY(c.zopstore(p.As), REGTMP2, 0, REGTMP, a, asm)
} }
case 74: // mov reg addr (including relocation) case 74: // mov reg addr (including relocation)
i2 := regoff(ctxt, &p.To) i2 := c.regoff(&p.To)
switch p.As { switch p.As {
case AMOVD: case AMOVD:
zRIL(_b, op_STGRL, uint32(p.From.Reg), 0, asm) zRIL(_b, op_STGRL, uint32(p.From.Reg), 0, asm)
...@@ -3396,10 +3407,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3396,10 +3407,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
zRIL(_b, op_LARL, REGTMP, 0, asm) zRIL(_b, op_LARL, REGTMP, 0, asm)
zRX(op_STE, uint32(p.From.Reg), 0, REGTMP, 0, asm) zRX(op_STE, uint32(p.From.Reg), 0, REGTMP, 0, asm)
} }
addrilreloc(ctxt, p.To.Sym, int64(i2)) c.addrilreloc(p.To.Sym, int64(i2))
case 75: // mov addr reg (including relocation) case 75: // mov addr reg (including relocation)
i2 := regoff(ctxt, &p.From) i2 := c.regoff(&p.From)
switch p.As { switch p.As {
case AMOVD: case AMOVD:
if i2&1 != 0 { if i2&1 != 0 {
...@@ -3437,11 +3448,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3437,11 +3448,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
zRIL(_a, op_LARL, REGTMP, 0, asm) zRIL(_a, op_LARL, REGTMP, 0, asm)
zRX(op_LE, uint32(p.To.Reg), 0, REGTMP, 0, asm) zRX(op_LE, uint32(p.To.Reg), 0, REGTMP, 0, asm)
} }
addrilreloc(ctxt, p.From.Sym, int64(i2)) c.addrilreloc(p.From.Sym, int64(i2))
case 77: // syscall $constant case 77: // syscall $constant
if p.From.Offset > 255 || p.From.Offset < 1 { if p.From.Offset > 255 || p.From.Offset < 1 {
ctxt.Diag("illegal system call; system call number out of range: %v", p) c.ctxt.Diag("illegal system call; system call number out of range: %v", p)
zE(op_TRAP2, asm) // trap always zE(op_TRAP2, asm) // trap always
} else { } else {
zI(op_SVC, uint32(p.From.Offset), asm) zI(op_SVC, uint32(p.From.Offset), asm)
...@@ -3453,7 +3464,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3453,7 +3464,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
*asm = append(*asm, 0, 0, 0, 0) *asm = append(*asm, 0, 0, 0, 0)
case 79: // compare and swap reg reg reg case 79: // compare and swap reg reg reg
v := regoff(ctxt, &p.To) v := c.regoff(&p.To)
if v < 0 { if v < 0 {
v = 0 v = 0
} }
...@@ -3521,12 +3532,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3521,12 +3532,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
zRRF(opcode, 5, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm) zRRF(opcode, 5, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
case 84: // storage-and-storage operations $length mem mem (length in From3) case 84: // storage-and-storage operations $length mem mem (length in From3)
l := regoff(ctxt, p.From3) l := c.regoff(p.From3)
if l < 1 || l > 256 { if l < 1 || l > 256 {
ctxt.Diag("number of bytes (%v) not in range [1,256]", l) c.ctxt.Diag("number of bytes (%v) not in range [1,256]", l)
} }
if p.From.Index != 0 || p.To.Index != 0 { if p.From.Index != 0 || p.To.Index != 0 {
ctxt.Diag("cannot use index reg") c.ctxt.Diag("cannot use index reg")
} }
b1 := p.To.Reg b1 := p.To.Reg
b2 := p.From.Reg b2 := p.From.Reg
...@@ -3536,11 +3547,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3536,11 +3547,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
if b2 == 0 { if b2 == 0 {
b2 = o.param b2 = o.param
} }
d1 := regoff(ctxt, &p.To) d1 := c.regoff(&p.To)
d2 := regoff(ctxt, &p.From) d2 := c.regoff(&p.From)
if d1 < 0 || d1 >= DISP12 { if d1 < 0 || d1 >= DISP12 {
if b2 == REGTMP { if b2 == REGTMP {
ctxt.Diag("REGTMP conflict") c.ctxt.Diag("REGTMP conflict")
} }
if b1 != REGTMP { if b1 != REGTMP {
zRRE(op_LGR, REGTMP, uint32(b1), asm) zRRE(op_LGR, REGTMP, uint32(b1), asm)
...@@ -3555,7 +3566,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3555,7 +3566,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
if d2 < 0 || d2 >= DISP12 { if d2 < 0 || d2 >= DISP12 {
if b1 == REGTMP2 { if b1 == REGTMP2 {
ctxt.Diag("REGTMP2 conflict") c.ctxt.Diag("REGTMP2 conflict")
} }
if b2 != REGTMP2 { if b2 != REGTMP2 {
zRRE(op_LGR, REGTMP2, uint32(b2), asm) zRRE(op_LGR, REGTMP2, uint32(b2), asm)
...@@ -3567,7 +3578,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3567,7 +3578,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
var opcode uint32 var opcode uint32
switch p.As { switch p.As {
default: default:
ctxt.Diag("unexpected opcode %v", p.As) c.ctxt.Diag("unexpected opcode %v", p.As)
case AMVC: case AMVC:
opcode = op_MVC opcode = op_MVC
case ACLC: case ACLC:
...@@ -3585,19 +3596,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3585,19 +3596,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
zSS(_a, opcode, uint32(l-1), 0, uint32(b1), uint32(d1), uint32(b2), uint32(d2), asm) zSS(_a, opcode, uint32(l-1), 0, uint32(b1), uint32(d1), uint32(b2), uint32(d2), asm)
case 85: // load address relative long case 85: // load address relative long
v := regoff(ctxt, &p.From) v := c.regoff(&p.From)
if p.From.Sym == nil { if p.From.Sym == nil {
if (v & 1) != 0 { if (v & 1) != 0 {
ctxt.Diag("cannot use LARL with odd offset: %v", v) c.ctxt.Diag("cannot use LARL with odd offset: %v", v)
} }
} else { } else {
addrilreloc(ctxt, p.From.Sym, int64(v)) c.addrilreloc(p.From.Sym, int64(v))
v = 0 v = 0
} }
zRIL(_b, op_LARL, uint32(p.To.Reg), uint32(v>>1), asm) zRIL(_b, op_LARL, uint32(p.To.Reg), uint32(v>>1), asm)
case 86: // load address case 86: // load address
d := vregoff(ctxt, &p.From) d := c.vregoff(&p.From)
x := p.From.Index x := p.From.Index
b := p.From.Reg b := p.From.Reg
if b == 0 { if b == 0 {
...@@ -3611,13 +3622,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3611,13 +3622,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
} }
case 87: // execute relative long case 87: // execute relative long
v := vregoff(ctxt, &p.From) v := c.vregoff(&p.From)
if p.From.Sym == nil { if p.From.Sym == nil {
if v&1 != 0 { if v&1 != 0 {
ctxt.Diag("cannot use EXRL with odd offset: %v", v) c.ctxt.Diag("cannot use EXRL with odd offset: %v", v)
} }
} else { } else {
addrilreloc(ctxt, p.From.Sym, v) c.addrilreloc(p.From.Sym, v)
v = 0 v = 0
} }
zRIL(_b, op_EXRL, uint32(p.To.Reg), uint32(v>>1), asm) zRIL(_b, op_EXRL, uint32(p.To.Reg), uint32(v>>1), asm)
...@@ -3634,7 +3645,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3634,7 +3645,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case ASTCKF: case ASTCKF:
opcode = op_STCKF opcode = op_STCKF
} }
v := vregoff(ctxt, &p.To) v := c.vregoff(&p.To)
r := int(p.To.Reg) r := int(p.To.Reg)
if r == 0 { if r == 0 {
r = int(o.param) r = int(o.param)
...@@ -3655,7 +3666,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3655,7 +3666,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
opcode = op_CLGRJ opcode = op_CLGRJ
opcode2 = op_CLGR opcode2 = op_CLGR
} }
mask := branchMask(ctxt, p) mask := c.branchMask(p)
if int32(int16(v)) != v { if int32(int16(v)) != v {
zRRE(opcode2, uint32(p.From.Reg), uint32(p.Reg), asm) zRRE(opcode2, uint32(p.From.Reg), uint32(p.Reg), asm)
zRIL(_c, op_BRCL, mask, uint32(v-sizeRRE/2), asm) zRIL(_c, op_BRCL, mask, uint32(v-sizeRRE/2), asm)
...@@ -3677,22 +3688,22 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3677,22 +3688,22 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
opcode = op_CLGIJ opcode = op_CLGIJ
opcode2 = op_CLGFI opcode2 = op_CLGFI
} }
mask := branchMask(ctxt, p) mask := c.branchMask(p)
if int32(int16(v)) != v { if int32(int16(v)) != v {
zRIL(_a, opcode2, uint32(p.From.Reg), uint32(regoff(ctxt, p.From3)), asm) zRIL(_a, opcode2, uint32(p.From.Reg), uint32(c.regoff(p.From3)), asm)
zRIL(_c, op_BRCL, mask, uint32(v-sizeRIL/2), asm) zRIL(_c, op_BRCL, mask, uint32(v-sizeRIL/2), asm)
} else { } else {
zRIE(_c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(regoff(ctxt, p.From3)), asm) zRIE(_c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(c.regoff(p.From3)), asm)
} }
case 93: // GOT lookup case 93: // GOT lookup
v := vregoff(ctxt, &p.To) v := c.vregoff(&p.To)
if v != 0 { if v != 0 {
ctxt.Diag("invalid offset against GOT slot %v", p) c.ctxt.Diag("invalid offset against GOT slot %v", p)
} }
zRIL(_b, op_LGRL, uint32(p.To.Reg), 0, asm) zRIL(_b, op_LGRL, uint32(p.To.Reg), 0, asm)
rel := obj.Addrel(ctxt.Cursym) rel := obj.Addrel(c.cursym)
rel.Off = int32(ctxt.Pc + 2) rel.Off = int32(c.pc + 2)
rel.Siz = 4 rel.Siz = 4
rel.Sym = p.From.Sym rel.Sym = p.From.Sym
rel.Type = obj.R_GOTPCREL rel.Type = obj.R_GOTPCREL
...@@ -3703,8 +3714,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3703,8 +3714,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
zRXY(op_LG, uint32(p.To.Reg), REGTMP, 0, 0, asm) zRXY(op_LG, uint32(p.To.Reg), REGTMP, 0, 0, asm)
zRI(op_BRC, 0xF, (sizeRI+8)>>1, asm) zRI(op_BRC, 0xF, (sizeRI+8)>>1, asm)
*asm = append(*asm, 0, 0, 0, 0, 0, 0, 0, 0) *asm = append(*asm, 0, 0, 0, 0, 0, 0, 0, 0)
rel := obj.Addrel(ctxt.Cursym) rel := obj.Addrel(c.cursym)
rel.Off = int32(ctxt.Pc + sizeRIL + sizeRXY + sizeRI) rel.Off = int32(c.pc + sizeRIL + sizeRXY + sizeRI)
rel.Siz = 8 rel.Siz = 8
rel.Sym = p.From.Sym rel.Sym = p.From.Sym
rel.Type = obj.R_TLS_LE rel.Type = obj.R_TLS_LE
...@@ -3723,8 +3734,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3723,8 +3734,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
// R_390_TLS_IEENT // R_390_TLS_IEENT
zRIL(_b, op_LARL, REGTMP, 0, asm) zRIL(_b, op_LARL, REGTMP, 0, asm)
ieent := obj.Addrel(ctxt.Cursym) ieent := obj.Addrel(c.cursym)
ieent.Off = int32(ctxt.Pc + 2) ieent.Off = int32(c.pc + 2)
ieent.Siz = 4 ieent.Siz = 4
ieent.Sym = p.From.Sym ieent.Sym = p.From.Sym
ieent.Type = obj.R_TLS_IE ieent.Type = obj.R_TLS_IE
...@@ -3736,14 +3747,14 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3736,14 +3747,14 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
// not strictly required but might allow the linker to optimize // not strictly required but might allow the linker to optimize
case 96: // clear macro case 96: // clear macro
length := vregoff(ctxt, &p.From) length := c.vregoff(&p.From)
offset := vregoff(ctxt, &p.To) offset := c.vregoff(&p.To)
reg := p.To.Reg reg := p.To.Reg
if reg == 0 { if reg == 0 {
reg = o.param reg = o.param
} }
if length <= 0 { if length <= 0 {
ctxt.Diag("cannot CLEAR %d bytes, must be greater than 0", length) c.ctxt.Diag("cannot CLEAR %d bytes, must be greater than 0", length)
} }
for length > 0 { for length > 0 {
if offset < 0 || offset >= DISP12 { if offset < 0 || offset >= DISP12 {
...@@ -3783,7 +3794,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3783,7 +3794,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 97: // store multiple case 97: // store multiple
rstart := p.From.Reg rstart := p.From.Reg
rend := p.Reg rend := p.Reg
offset := regoff(ctxt, &p.To) offset := c.regoff(&p.To)
reg := p.To.Reg reg := p.To.Reg
if reg == 0 { if reg == 0 {
reg = o.param reg = o.param
...@@ -3810,7 +3821,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3810,7 +3821,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 98: // load multiple case 98: // load multiple
rstart := p.Reg rstart := p.Reg
rend := p.To.Reg rend := p.To.Reg
offset := regoff(ctxt, &p.From) offset := c.regoff(&p.From)
reg := p.From.Reg reg := p.From.Reg
if reg == 0 { if reg == 0 {
reg = o.param reg = o.param
...@@ -3836,11 +3847,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3836,11 +3847,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 99: // interlocked load and op case 99: // interlocked load and op
if p.To.Index != 0 { if p.To.Index != 0 {
ctxt.Diag("cannot use indexed address") c.ctxt.Diag("cannot use indexed address")
} }
offset := regoff(ctxt, &p.To) offset := c.regoff(&p.To)
if offset < -DISP20/2 || offset >= DISP20/2 { if offset < -DISP20/2 || offset >= DISP20/2 {
ctxt.Diag("%v does not fit into 20-bit signed integer", offset) c.ctxt.Diag("%v does not fit into 20-bit signed integer", offset)
} }
var opcode uint32 var opcode uint32
switch p.As { switch p.As {
...@@ -3870,49 +3881,49 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3870,49 +3881,49 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 100: // VRX STORE case 100: // VRX STORE
op, m3, _ := vop(p.As) op, m3, _ := vop(p.As)
if p.From3 != nil { if p.From3 != nil {
m3 = uint32(vregoff(ctxt, p.From3)) m3 = uint32(c.vregoff(p.From3))
} }
b2 := p.To.Reg b2 := p.To.Reg
if b2 == 0 { if b2 == 0 {
b2 = o.param b2 = o.param
} }
d2 := uint32(vregoff(ctxt, &p.To)) d2 := uint32(c.vregoff(&p.To))
zVRX(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm) zVRX(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm)
case 101: // VRX LOAD case 101: // VRX LOAD
op, m3, _ := vop(p.As) op, m3, _ := vop(p.As)
if p.From3 != nil { if p.From3 != nil {
m3 = uint32(vregoff(ctxt, p.From3)) m3 = uint32(c.vregoff(p.From3))
} }
b2 := p.From.Reg b2 := p.From.Reg
if b2 == 0 { if b2 == 0 {
b2 = o.param b2 = o.param
} }
d2 := uint32(vregoff(ctxt, &p.From)) d2 := uint32(c.vregoff(&p.From))
zVRX(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm) zVRX(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm)
case 102: // VRV SCATTER case 102: // VRV SCATTER
op, m3, _ := vop(p.As) op, m3, _ := vop(p.As)
if p.From3 != nil { if p.From3 != nil {
m3 = uint32(vregoff(ctxt, p.From3)) m3 = uint32(c.vregoff(p.From3))
} }
b2 := p.To.Reg b2 := p.To.Reg
if b2 == 0 { if b2 == 0 {
b2 = o.param b2 = o.param
} }
d2 := uint32(vregoff(ctxt, &p.To)) d2 := uint32(c.vregoff(&p.To))
zVRV(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm) zVRV(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm)
case 103: // VRV GATHER case 103: // VRV GATHER
op, m3, _ := vop(p.As) op, m3, _ := vop(p.As)
if p.From3 != nil { if p.From3 != nil {
m3 = uint32(vregoff(ctxt, p.From3)) m3 = uint32(c.vregoff(p.From3))
} }
b2 := p.From.Reg b2 := p.From.Reg
if b2 == 0 { if b2 == 0 {
b2 = o.param b2 = o.param
} }
d2 := uint32(vregoff(ctxt, &p.From)) d2 := uint32(c.vregoff(&p.From))
zVRV(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm) zVRV(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm)
case 104: // VRS SHIFT/ROTATE and LOAD GR FROM VR ELEMENT case 104: // VRS SHIFT/ROTATE and LOAD GR FROM VR ELEMENT
...@@ -3921,12 +3932,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3921,12 +3932,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
if fr == 0 { if fr == 0 {
fr = p.To.Reg fr = p.To.Reg
} }
bits := uint32(vregoff(ctxt, &p.From)) bits := uint32(c.vregoff(&p.From))
zVRS(op, uint32(p.To.Reg), uint32(fr), uint32(p.From.Reg), bits, m4, asm) zVRS(op, uint32(p.To.Reg), uint32(fr), uint32(p.From.Reg), bits, m4, asm)
case 105: // VRS STORE MULTIPLE case 105: // VRS STORE MULTIPLE
op, _, _ := vop(p.As) op, _, _ := vop(p.As)
offset := uint32(vregoff(ctxt, &p.To)) offset := uint32(c.vregoff(&p.To))
reg := p.To.Reg reg := p.To.Reg
if reg == 0 { if reg == 0 {
reg = o.param reg = o.param
...@@ -3935,7 +3946,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3935,7 +3946,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 106: // VRS LOAD MULTIPLE case 106: // VRS LOAD MULTIPLE
op, _, _ := vop(p.As) op, _, _ := vop(p.As)
offset := uint32(vregoff(ctxt, &p.From)) offset := uint32(c.vregoff(&p.From))
reg := p.From.Reg reg := p.From.Reg
if reg == 0 { if reg == 0 {
reg = o.param reg = o.param
...@@ -3944,7 +3955,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3944,7 +3955,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 107: // VRS STORE WITH LENGTH case 107: // VRS STORE WITH LENGTH
op, _, _ := vop(p.As) op, _, _ := vop(p.As)
offset := uint32(vregoff(ctxt, &p.To)) offset := uint32(c.vregoff(&p.To))
reg := p.To.Reg reg := p.To.Reg
if reg == 0 { if reg == 0 {
reg = o.param reg = o.param
...@@ -3953,7 +3964,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3953,7 +3964,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 108: // VRS LOAD WITH LENGTH case 108: // VRS LOAD WITH LENGTH
op, _, _ := vop(p.As) op, _, _ := vop(p.As)
offset := uint32(vregoff(ctxt, &p.From)) offset := uint32(c.vregoff(&p.From))
reg := p.From.Reg reg := p.From.Reg
if reg == 0 { if reg == 0 {
reg = o.param reg = o.param
...@@ -3962,7 +3973,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3962,7 +3973,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 109: // VRI-a case 109: // VRI-a
op, m3, _ := vop(p.As) op, m3, _ := vop(p.As)
i2 := uint32(vregoff(ctxt, &p.From)) i2 := uint32(c.vregoff(&p.From))
switch p.As { switch p.As {
case AVZERO: case AVZERO:
i2 = 0 i2 = 0
...@@ -3970,30 +3981,30 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -3970,30 +3981,30 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
i2 = 0xffff i2 = 0xffff
} }
if p.From3 != nil { if p.From3 != nil {
m3 = uint32(vregoff(ctxt, p.From3)) m3 = uint32(c.vregoff(p.From3))
} }
zVRIa(op, uint32(p.To.Reg), i2, m3, asm) zVRIa(op, uint32(p.To.Reg), i2, m3, asm)
case 110: case 110:
op, m4, _ := vop(p.As) op, m4, _ := vop(p.As)
i2 := uint32(vregoff(ctxt, p.From3)) i2 := uint32(c.vregoff(p.From3))
i3 := uint32(vregoff(ctxt, &p.From)) i3 := uint32(c.vregoff(&p.From))
zVRIb(op, uint32(p.To.Reg), i2, i3, m4, asm) zVRIb(op, uint32(p.To.Reg), i2, i3, m4, asm)
case 111: case 111:
op, m4, _ := vop(p.As) op, m4, _ := vop(p.As)
i2 := uint32(vregoff(ctxt, &p.From)) i2 := uint32(c.vregoff(&p.From))
zVRIc(op, uint32(p.To.Reg), uint32(p.Reg), i2, m4, asm) zVRIc(op, uint32(p.To.Reg), uint32(p.Reg), i2, m4, asm)
case 112: case 112:
op, m5, _ := vop(p.As) op, m5, _ := vop(p.As)
i4 := uint32(vregoff(ctxt, p.From3)) i4 := uint32(c.vregoff(p.From3))
zVRId(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), i4, m5, asm) zVRId(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), i4, m5, asm)
case 113: case 113:
op, m4, _ := vop(p.As) op, m4, _ := vop(p.As)
m5 := singleElementMask(p.As) m5 := singleElementMask(p.As)
i3 := uint32(vregoff(ctxt, &p.From)) i3 := uint32(c.vregoff(&p.From))
zVRIe(op, uint32(p.To.Reg), uint32(p.Reg), i3, m5, m4, asm) zVRIe(op, uint32(p.To.Reg), uint32(p.Reg), i3, m5, m4, asm)
case 114: // VRR-a case 114: // VRR-a
...@@ -4054,25 +4065,25 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { ...@@ -4054,25 +4065,25 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) {
case 123: // VPDI $m4, V2, V3, V1 case 123: // VPDI $m4, V2, V3, V1
op, _, _ := vop(p.As) op, _, _ := vop(p.As)
m4 := regoff(ctxt, p.From3) m4 := c.regoff(p.From3)
zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), 0, 0, uint32(m4), asm) zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), 0, 0, uint32(m4), asm)
} }
} }
func vregoff(ctxt *obj.Link, a *obj.Addr) int64 { func (c *ctxtz) vregoff(a *obj.Addr) int64 {
ctxt.Instoffset = 0 c.instoffset = 0
if a != nil { if a != nil {
aclass(ctxt, a) c.aclass(a)
} }
return ctxt.Instoffset return c.instoffset
} }
func regoff(ctxt *obj.Link, a *obj.Addr) int32 { func (c *ctxtz) regoff(a *obj.Addr) int32 {
return int32(vregoff(ctxt, a)) return int32(c.vregoff(a))
} }
// zopload returns the RXY op for the given load // zopload returns the RXY op for the given load
func zopload(ctxt *obj.Link, a obj.As) uint32 { func (c *ctxtz) zopload(a obj.As) uint32 {
switch a { switch a {
// fixed point load // fixed point load
case AMOVD: case AMOVD:
...@@ -4105,12 +4116,12 @@ func zopload(ctxt *obj.Link, a obj.As) uint32 { ...@@ -4105,12 +4116,12 @@ func zopload(ctxt *obj.Link, a obj.As) uint32 {
return op_LRVH return op_LRVH
} }
ctxt.Diag("unknown store opcode %v", a) c.ctxt.Diag("unknown store opcode %v", a)
return 0 return 0
} }
// zopstore returns the RXY op for the given store // zopstore returns the RXY op for the given store
func zopstore(ctxt *obj.Link, a obj.As) uint32 { func (c *ctxtz) zopstore(a obj.As) uint32 {
switch a { switch a {
// fixed point store // fixed point store
case AMOVD: case AMOVD:
...@@ -4137,12 +4148,12 @@ func zopstore(ctxt *obj.Link, a obj.As) uint32 { ...@@ -4137,12 +4148,12 @@ func zopstore(ctxt *obj.Link, a obj.As) uint32 {
return op_STRVH return op_STRVH
} }
ctxt.Diag("unknown store opcode %v", a) c.ctxt.Diag("unknown store opcode %v", a)
return 0 return 0
} }
// zoprre returns the RRE op for the given a // zoprre returns the RRE op for the given a
func zoprre(ctxt *obj.Link, a obj.As) uint32 { func (c *ctxtz) zoprre(a obj.As) uint32 {
switch a { switch a {
case ACMP: case ACMP:
return op_CGR return op_CGR
...@@ -4155,24 +4166,24 @@ func zoprre(ctxt *obj.Link, a obj.As) uint32 { ...@@ -4155,24 +4166,24 @@ func zoprre(ctxt *obj.Link, a obj.As) uint32 {
case ACEBR: case ACEBR:
return op_CEBR return op_CEBR
} }
ctxt.Diag("unknown rre opcode %v", a) c.ctxt.Diag("unknown rre opcode %v", a)
return 0 return 0
} }
// zoprr returns the RR op for the given a // zoprr returns the RR op for the given a
func zoprr(ctxt *obj.Link, a obj.As) uint32 { func (c *ctxtz) zoprr(a obj.As) uint32 {
switch a { switch a {
case ACMPW: case ACMPW:
return op_CR return op_CR
case ACMPWU: case ACMPWU:
return op_CLR return op_CLR
} }
ctxt.Diag("unknown rr opcode %v", a) c.ctxt.Diag("unknown rr opcode %v", a)
return 0 return 0
} }
// zopril returns the RIL op for the given a // zopril returns the RIL op for the given a
func zopril(ctxt *obj.Link, a obj.As) uint32 { func (c *ctxtz) zopril(a obj.As) uint32 {
switch a { switch a {
case ACMP: case ACMP:
return op_CGFI return op_CGFI
...@@ -4183,7 +4194,7 @@ func zopril(ctxt *obj.Link, a obj.As) uint32 { ...@@ -4183,7 +4194,7 @@ func zopril(ctxt *obj.Link, a obj.As) uint32 {
case ACMPWU: case ACMPWU:
return op_CLFI return op_CLFI
} }
ctxt.Diag("unknown ril opcode %v", a) c.ctxt.Diag("unknown ril opcode %v", a)
return 0 return 0
} }
......
...@@ -39,13 +39,11 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ...@@ -39,13 +39,11 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
p.From.Class = 0 p.From.Class = 0
p.To.Class = 0 p.To.Class = 0
c := ctxtz{ctxt: ctxt, newprog: newprog}
// Rewrite BR/BL to symbol as TYPE_BRANCH. // Rewrite BR/BL to symbol as TYPE_BRANCH.
switch p.As { switch p.As {
case ABR, case ABR, ABL, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY:
ABL,
obj.ARET,
obj.ADUFFZERO,
obj.ADUFFCOPY:
if p.To.Sym != nil { if p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
} }
...@@ -107,13 +105,13 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ...@@ -107,13 +105,13 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
} }
} }
if ctxt.Flag_dynlink { if c.ctxt.Flag_dynlink {
rewriteToUseGot(ctxt, p, newprog) c.rewriteToUseGot(p)
} }
} }
// Rewrite p, if necessary, to access global data via the global offset table. // Rewrite p, if necessary, to access global data via the global offset table.
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { func (c *ctxtz) rewriteToUseGot(p *obj.Prog) {
// At the moment EXRL instructions are not emitted by the compiler and only reference local symbols in // At the moment EXRL instructions are not emitted by the compiler and only reference local symbols in
// assembly code. // assembly code.
if p.As == AEXRL { if p.As == AEXRL {
...@@ -127,13 +125,13 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ...@@ -127,13 +125,13 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// MOVD $sym, Rx becomes MOVD sym@GOT, Rx // MOVD $sym, Rx becomes MOVD sym@GOT, Rx
// MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx; ADD <off>, Rx // MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx; ADD <off>, Rx
if p.To.Type != obj.TYPE_REG || p.As != AMOVD { if p.To.Type != obj.TYPE_REG || p.As != AMOVD {
ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) c.ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p)
} }
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_GOTREF p.From.Name = obj.NAME_GOTREF
q := p q := p
if p.From.Offset != 0 { if p.From.Offset != 0 {
q = obj.Appendp(p, newprog) q = obj.Appendp(p, c.newprog)
q.As = AADD q.As = AADD
q.From.Type = obj.TYPE_CONST q.From.Type = obj.TYPE_CONST
q.From.Offset = p.From.Offset q.From.Offset = p.From.Offset
...@@ -142,7 +140,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ...@@ -142,7 +140,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
} }
} }
if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
ctxt.Diag("don't know how to handle %v with -dynlink", p) c.ctxt.Diag("don't know how to handle %v with -dynlink", p)
} }
var source *obj.Addr var source *obj.Addr
// MOVD sym, Ry becomes MOVD sym@GOT, REGTMP; MOVD (REGTMP), Ry // MOVD sym, Ry becomes MOVD sym@GOT, REGTMP; MOVD (REGTMP), Ry
...@@ -150,7 +148,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ...@@ -150,7 +148,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// An addition may be inserted between the two MOVs if there is an offset. // An addition may be inserted between the two MOVs if there is an offset.
if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) c.ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
} }
source = &p.From source = &p.From
} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
...@@ -165,10 +163,10 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ...@@ -165,10 +163,10 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
return return
} }
if source.Type != obj.TYPE_MEM { if source.Type != obj.TYPE_MEM {
ctxt.Diag("don't know how to handle %v with -dynlink", p) c.ctxt.Diag("don't know how to handle %v with -dynlink", p)
} }
p1 := obj.Appendp(p, newprog) p1 := obj.Appendp(p, c.newprog)
p2 := obj.Appendp(p1, newprog) p2 := obj.Appendp(p1, c.newprog)
p1.As = AMOVD p1.As = AMOVD
p1.From.Type = obj.TYPE_MEM p1.From.Type = obj.TYPE_MEM
...@@ -196,13 +194,13 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ...@@ -196,13 +194,13 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// TODO(minux): add morestack short-cuts with small fixed frame-size. // TODO(minux): add morestack short-cuts with small fixed frame-size.
ctxt.Cursym = cursym
if cursym.Text == nil || cursym.Text.Link == nil { if cursym.Text == nil || cursym.Text.Link == nil {
return return
} }
p := cursym.Text c := ctxtz{ctxt: ctxt, cursym: cursym, newprog: newprog}
p := c.cursym.Text
textstksiz := p.To.Offset textstksiz := p.To.Offset
if textstksiz == -8 { if textstksiz == -8 {
// Compatibility hack. // Compatibility hack.
...@@ -210,16 +208,16 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -210,16 +208,16 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
textstksiz = 0 textstksiz = 0
} }
if textstksiz%8 != 0 { if textstksiz%8 != 0 {
ctxt.Diag("frame size %d not a multiple of 8", textstksiz) c.ctxt.Diag("frame size %d not a multiple of 8", textstksiz)
} }
if p.From3.Offset&obj.NOFRAME != 0 { if p.From3.Offset&obj.NOFRAME != 0 {
if textstksiz != 0 { if textstksiz != 0 {
ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz)
} }
} }
cursym.Args = p.To.Val.(int32) c.cursym.Args = p.To.Val.(int32)
cursym.Locals = int32(textstksiz) c.cursym.Locals = int32(textstksiz)
/* /*
* find leaf subroutines * find leaf subroutines
...@@ -228,7 +226,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -228,7 +226,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
*/ */
var q *obj.Prog var q *obj.Prog
for p := cursym.Text; p != nil; p = p.Link { for p := c.cursym.Text; p != nil; p = p.Link {
switch p.As { switch p.As {
case obj.ATEXT: case obj.ATEXT:
q = p q = p
...@@ -236,7 +234,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -236,7 +234,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
case ABL, ABCL: case ABL, ABCL:
q = p q = p
cursym.Text.Mark &^= LEAF c.cursym.Text.Mark &^= LEAF
fallthrough fallthrough
case ABC, case ABC,
...@@ -287,7 +285,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -287,7 +285,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var pPre *obj.Prog var pPre *obj.Prog
var pPreempt *obj.Prog var pPreempt *obj.Prog
wasSplit := false wasSplit := false
for p := cursym.Text; p != nil; p = p.Link { for p := c.cursym.Text; p != nil; p = p.Link {
pLast = p pLast = p
switch p.As { switch p.As {
case obj.ATEXT: case obj.ATEXT:
...@@ -301,7 +299,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -301,7 +299,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if p.From3.Offset&obj.NOFRAME == 0 { if p.From3.Offset&obj.NOFRAME == 0 {
// If there is a stack frame at all, it includes // If there is a stack frame at all, it includes
// space to save the LR. // space to save the LR.
autosize += int32(ctxt.FixedFrameSize()) autosize += int32(c.ctxt.FixedFrameSize())
} }
if p.Mark&LEAF != 0 && autosize < obj.StackSmall { if p.Mark&LEAF != 0 && autosize < obj.StackSmall {
...@@ -315,7 +313,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -315,7 +313,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q := p q := p
if p.From3.Offset&obj.NOSPLIT == 0 { if p.From3.Offset&obj.NOSPLIT == 0 {
p, pPreempt = stacksplitPre(ctxt, p, newprog, autosize) // emit pre part of split check p, pPreempt = c.stacksplitPre(p, autosize) // emit pre part of split check
pPre = p pPre = p
wasSplit = true //need post part of split wasSplit = true //need post part of split
} }
...@@ -326,7 +324,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -326,7 +324,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// Store link register before decrementing SP, so if a signal comes // Store link register before decrementing SP, so if a signal comes
// during the execution of the function prologue, the traceback // during the execution of the function prologue, the traceback
// code will not see a half-updated stack frame. // code will not see a half-updated stack frame.
q = obj.Appendp(p, newprog) q = obj.Appendp(p, c.newprog)
q.As = AMOVD q.As = AMOVD
q.From.Type = obj.TYPE_REG q.From.Type = obj.TYPE_REG
q.From.Reg = REG_LR q.From.Reg = REG_LR
...@@ -334,7 +332,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -334,7 +332,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Reg = REGSP q.To.Reg = REGSP
q.To.Offset = int64(-autosize) q.To.Offset = int64(-autosize)
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = AMOVD q.As = AMOVD
q.From.Type = obj.TYPE_ADDR q.From.Type = obj.TYPE_ADDR
q.From.Offset = int64(-autosize) q.From.Offset = int64(-autosize)
...@@ -342,19 +340,19 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -342,19 +340,19 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = REGSP q.To.Reg = REGSP
q.Spadj = autosize q.Spadj = autosize
} else if cursym.Text.Mark&LEAF == 0 { } else if c.cursym.Text.Mark&LEAF == 0 {
// A very few functions that do not return to their caller // A very few functions that do not return to their caller
// (e.g. gogo) are not identified as leaves but still have // (e.g. gogo) are not identified as leaves but still have
// no frame. // no frame.
cursym.Text.Mark |= LEAF c.cursym.Text.Mark |= LEAF
} }
if cursym.Text.Mark&LEAF != 0 { if c.cursym.Text.Mark&LEAF != 0 {
cursym.Set(obj.AttrLeaf, true) c.cursym.Set(obj.AttrLeaf, true)
break break
} }
if cursym.Text.From3.Offset&obj.WRAPPER != 0 { if c.cursym.Text.From3.Offset&obj.WRAPPER != 0 {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
// //
// MOVD g_panic(g), R3 // MOVD g_panic(g), R3
...@@ -372,28 +370,28 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -372,28 +370,28 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// The NOP is needed to give the jumps somewhere to land. // The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not a s390x NOP: it encodes to 0 instruction bytes. // It is a liblink NOP, not a s390x NOP: it encodes to 0 instruction bytes.
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = AMOVD q.As = AMOVD
q.From.Type = obj.TYPE_MEM q.From.Type = obj.TYPE_MEM
q.From.Reg = REGG q.From.Reg = REGG
q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = REG_R3 q.To.Reg = REG_R3
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = ACMP q.As = ACMP
q.From.Type = obj.TYPE_REG q.From.Type = obj.TYPE_REG
q.From.Reg = REG_R3 q.From.Reg = REG_R3
q.To.Type = obj.TYPE_CONST q.To.Type = obj.TYPE_CONST
q.To.Offset = 0 q.To.Offset = 0
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = ABEQ q.As = ABEQ
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
p1 := q p1 := q
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = AMOVD q.As = AMOVD
q.From.Type = obj.TYPE_MEM q.From.Type = obj.TYPE_MEM
q.From.Reg = REG_R3 q.From.Reg = REG_R3
...@@ -401,35 +399,35 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -401,35 +399,35 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = REG_R4 q.To.Reg = REG_R4
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = AADD q.As = AADD
q.From.Type = obj.TYPE_CONST q.From.Type = obj.TYPE_CONST
q.From.Offset = int64(autosize) + ctxt.FixedFrameSize() q.From.Offset = int64(autosize) + c.ctxt.FixedFrameSize()
q.Reg = REGSP q.Reg = REGSP
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = REG_R5 q.To.Reg = REG_R5
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = ACMP q.As = ACMP
q.From.Type = obj.TYPE_REG q.From.Type = obj.TYPE_REG
q.From.Reg = REG_R4 q.From.Reg = REG_R4
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = REG_R5 q.To.Reg = REG_R5
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = ABNE q.As = ABNE
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
p2 := q p2 := q
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = AADD q.As = AADD
q.From.Type = obj.TYPE_CONST q.From.Type = obj.TYPE_CONST
q.From.Offset = ctxt.FixedFrameSize() q.From.Offset = c.ctxt.FixedFrameSize()
q.Reg = REGSP q.Reg = REGSP
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = REG_R6 q.To.Reg = REG_R6
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = AMOVD q.As = AMOVD
q.From.Type = obj.TYPE_REG q.From.Type = obj.TYPE_REG
q.From.Reg = REG_R6 q.From.Reg = REG_R6
...@@ -437,7 +435,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -437,7 +435,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Reg = REG_R3 q.To.Reg = REG_R3
q.To.Offset = 0 // Panic.argp q.To.Offset = 0 // Panic.argp
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = obj.ANOP q.As = obj.ANOP
p1.Pcond = q p1.Pcond = q
...@@ -447,7 +445,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -447,7 +445,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
case obj.ARET: case obj.ARET:
retTarget := p.To.Sym retTarget := p.To.Sym
if cursym.Text.Mark&LEAF != 0 { if c.cursym.Text.Mark&LEAF != 0 {
if autosize == 0 { if autosize == 0 {
p.As = ABR p.As = ABR
p.From = obj.Addr{} p.From = obj.Addr{}
...@@ -469,7 +467,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -469,7 +467,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.To.Reg = REGSP p.To.Reg = REGSP
p.Spadj = -autosize p.Spadj = -autosize
q = obj.Appendp(p, newprog) q = obj.Appendp(p, c.newprog)
q.As = ABR q.As = ABR
q.From = obj.Addr{} q.From = obj.Addr{}
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
...@@ -489,7 +487,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -489,7 +487,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q = p q = p
if autosize != 0 { if autosize != 0 {
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = AADD q.As = AADD
q.From.Type = obj.TYPE_CONST q.From.Type = obj.TYPE_CONST
q.From.Offset = int64(autosize) q.From.Offset = int64(autosize)
...@@ -498,7 +496,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -498,7 +496,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.Spadj = -autosize q.Spadj = -autosize
} }
q = obj.Appendp(q, newprog) q = obj.Appendp(q, c.newprog)
q.As = ABR q.As = ABR
q.From = obj.Addr{} q.From = obj.Addr{}
if retTarget == nil { if retTarget == nil {
...@@ -518,22 +516,22 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ...@@ -518,22 +516,22 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
} }
} }
if wasSplit { if wasSplit {
stacksplitPost(ctxt, pLast, pPre, pPreempt, newprog, autosize) // emit post part of split check c.stacksplitPost(pLast, pPre, pPreempt, autosize) // emit post part of split check
} }
} }
func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize int32) (*obj.Prog, *obj.Prog) { func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (*obj.Prog, *obj.Prog) {
var q *obj.Prog var q *obj.Prog
// MOVD g_stackguard(g), R3 // MOVD g_stackguard(g), R3
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.As = AMOVD p.As = AMOVD
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0
if ctxt.Cursym.CFunc() { if c.cursym.CFunc() {
p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
} }
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3 p.To.Reg = REG_R3
...@@ -548,7 +546,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize ...@@ -548,7 +546,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize
// q1: BLT done // q1: BLT done
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
//q1 = p //q1 = p
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3 p.From.Reg = REG_R3
...@@ -571,7 +569,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize ...@@ -571,7 +569,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize
// large stack: SP-framesize < stackguard-StackSmall // large stack: SP-framesize < stackguard-StackSmall
// ADD $-(framesize-StackSmall), SP, R4 // ADD $-(framesize-StackSmall), SP, R4
// CMP stackguard, R4 // CMP stackguard, R4
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.As = AADD p.As = AADD
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
...@@ -580,7 +578,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize ...@@ -580,7 +578,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R4 p.To.Reg = REG_R4
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3 p.From.Reg = REG_R3
p.Reg = REG_R4 p.Reg = REG_R4
...@@ -603,7 +601,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize ...@@ -603,7 +601,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize
// SUB R3, R4 // SUB R3, R4
// MOVD $(framesize+(StackGuard-StackSmall)), TEMP // MOVD $(framesize+(StackGuard-StackSmall)), TEMP
// CMPUBGE TEMP, R4 // CMPUBGE TEMP, R4
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.As = ACMP p.As = ACMP
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
...@@ -611,12 +609,12 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize ...@@ -611,12 +609,12 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
p.To.Offset = obj.StackPreempt p.To.Offset = obj.StackPreempt
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
q = p q = p
p.As = ABEQ p.As = ABEQ
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.As = AADD p.As = AADD
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = obj.StackGuard p.From.Offset = obj.StackGuard
...@@ -624,21 +622,21 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize ...@@ -624,21 +622,21 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R4 p.To.Reg = REG_R4
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.As = ASUB p.As = ASUB
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3 p.From.Reg = REG_R3
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R4 p.To.Reg = REG_R4
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.As = AMOVD p.As = AMOVD
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REGTMP p.To.Reg = REGTMP
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = REGTMP p.From.Reg = REGTMP
p.Reg = REG_R4 p.Reg = REG_R4
...@@ -649,16 +647,16 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize ...@@ -649,16 +647,16 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize
return p, q return p, q
} }
func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, newprog obj.ProgAlloc, framesize int32) *obj.Prog { func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, framesize int32) *obj.Prog {
// Now we are at the end of the function, but logically // Now we are at the end of the function, but logically
// we are still in function prologue. We need to fix the // we are still in function prologue. We need to fix the
// SP data and PCDATA. // SP data and PCDATA.
spfix := obj.Appendp(p, newprog) spfix := obj.Appendp(p, c.newprog)
spfix.As = obj.ANOP spfix.As = obj.ANOP
spfix.Spadj = -framesize spfix.Spadj = -framesize
pcdata := obj.Appendp(spfix, newprog) pcdata := obj.Appendp(spfix, c.newprog)
pcdata.Pos = ctxt.Cursym.Text.Pos pcdata.Pos = c.cursym.Text.Pos
pcdata.As = obj.APCDATA pcdata.As = obj.APCDATA
pcdata.From.Type = obj.TYPE_CONST pcdata.From.Type = obj.TYPE_CONST
pcdata.From.Offset = obj.PCDATA_StackMapIndex pcdata.From.Offset = obj.PCDATA_StackMapIndex
...@@ -666,7 +664,7 @@ func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.P ...@@ -666,7 +664,7 @@ func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.P
pcdata.To.Offset = -1 // pcdata starts at -1 at function entry pcdata.To.Offset = -1 // pcdata starts at -1 at function entry
// MOVD LR, R5 // MOVD LR, R5
p = obj.Appendp(pcdata, newprog) p = obj.Appendp(pcdata, c.newprog)
pPre.Pcond = p pPre.Pcond = p
p.As = AMOVD p.As = AMOVD
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
...@@ -678,24 +676,24 @@ func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.P ...@@ -678,24 +676,24 @@ func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.P
} }
// BL runtime.morestack(SB) // BL runtime.morestack(SB)
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.As = ABL p.As = ABL
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
if ctxt.Cursym.CFunc() { if c.cursym.CFunc() {
p.To.Sym = ctxt.Lookup("runtime.morestackc", 0) p.To.Sym = c.ctxt.Lookup("runtime.morestackc", 0)
} else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 { } else if c.cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
p.To.Sym = ctxt.Lookup("runtime.morestack_noctxt", 0) p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt", 0)
} else { } else {
p.To.Sym = ctxt.Lookup("runtime.morestack", 0) p.To.Sym = c.ctxt.Lookup("runtime.morestack", 0)
} }
// BR start // BR start
p = obj.Appendp(p, newprog) p = obj.Appendp(p, c.newprog)
p.As = ABR p.As = ABR
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
p.Pcond = ctxt.Cursym.Text.Link p.Pcond = c.cursym.Text.Link
return p return p
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment