Commit 3134ab3c authored by Keith Randall's avatar Keith Randall

cmd/compile: redo nil checks

Get rid of BlockCheck. Josh goaded me into it, and I went
down a rabbithole making it happen.

NilCheck now panics if the pointer is nil and returns void, as before.
BlockCheck is gone, and NilCheck is no longer a Control value for
any block. It just exists (and deadcode knows not to throw it away).

I rewrote the nilcheckelim pass to handle this case.  In particular,
there can now be multiple NilCheck ops per block.

I moved all of the arch-dependent nil check elimination done as
part of ssaGenValue into its own proper pass, so we don't have to
duplicate that code for every architecture.

Making the arch-dependent nil check its own pass means I needed
to add a bunch of flags to the opcode table so I could write
the code without arch-dependent ops everywhere.

Change-Id: I419f891ac9b0de313033ff09115c374163416a9f
Reviewed-on: https://go-review.googlesource.com/29120
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarDavid Chase <drchase@google.com>
parent f9e9412c
...@@ -905,64 +905,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -905,64 +905,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpKeepAlive: case ssa.OpKeepAlive:
gc.KeepAlive(v) gc.KeepAlive(v)
case ssa.OpAMD64LoweredNilCheck: case ssa.OpAMD64LoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload,
ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore,
ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload,
ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVOload,
ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVOstore,
ssa.OpAMD64MOVQatomicload, ssa.OpAMD64MOVLatomicload,
ssa.OpAMD64CMPXCHGQlock, ssa.OpAMD64CMPXCHGLlock,
ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock:
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ, ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
if w.Args[1] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
off := ssa.ValAndOff(v.AuxInt).Off()
if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
}
if w.Type.IsMemory() || w.Type.IsTuple() && w.Type.FieldType(1).IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if the input is nil. // Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg). // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
// Should we use the 3-byte TESTB $0, (reg) instead? It is larger // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
...@@ -1065,7 +1007,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -1065,7 +1007,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line) s.SetLineno(b.Line)
switch b.Kind { switch b.Kind {
case ssa.BlockPlain, ssa.BlockCheck: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
......
...@@ -771,63 +771,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -771,63 +771,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg)) p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck: case ssa.OpARMLoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpARMMOVBload, ssa.OpARMMOVBUload, ssa.OpARMMOVHload, ssa.OpARMMOVHUload,
ssa.OpARMMOVWload, ssa.OpARMMOVFload, ssa.OpARMMOVDload,
ssa.OpARMMOVBstore, ssa.OpARMMOVHstore, ssa.OpARMMOVWstore,
ssa.OpARMMOVFstore, ssa.OpARMMOVDstore:
// arg0 is ptr, auxint is offset
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero:
// arg0 is ptr
if w.Args[0] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove:
// arg0 is dst ptr, arg1 is src ptr
if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
default:
}
if w.Type.IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(arm.AMOVB) p := gc.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
...@@ -994,7 +937,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -994,7 +937,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line) s.SetLineno(b.Line)
switch b.Kind { switch b.Kind {
case ssa.BlockPlain, ssa.BlockCheck: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
......
...@@ -771,71 +771,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -771,71 +771,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.Maxarg = v.AuxInt gc.Maxarg = v.AuxInt
} }
case ssa.OpARM64LoweredNilCheck: case ssa.OpARM64LoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpARM64MOVBload, ssa.OpARM64MOVBUload, ssa.OpARM64MOVHload, ssa.OpARM64MOVHUload,
ssa.OpARM64MOVWload, ssa.OpARM64MOVWUload, ssa.OpARM64MOVDload,
ssa.OpARM64FMOVSload, ssa.OpARM64FMOVDload,
ssa.OpARM64LDAR, ssa.OpARM64LDARW,
ssa.OpARM64MOVBstore, ssa.OpARM64MOVHstore, ssa.OpARM64MOVWstore, ssa.OpARM64MOVDstore,
ssa.OpARM64FMOVSstore, ssa.OpARM64FMOVDstore,
ssa.OpARM64MOVBstorezero, ssa.OpARM64MOVHstorezero, ssa.OpARM64MOVWstorezero, ssa.OpARM64MOVDstorezero,
ssa.OpARM64STLR, ssa.OpARM64STLRW,
ssa.OpARM64LoweredAtomicExchange64, ssa.OpARM64LoweredAtomicExchange32,
ssa.OpARM64LoweredAtomicAdd64, ssa.OpARM64LoweredAtomicAdd32,
ssa.OpARM64LoweredAtomicCas64, ssa.OpARM64LoweredAtomicCas32,
ssa.OpARM64LoweredAtomicAnd8, ssa.OpARM64LoweredAtomicOr8:
// arg0 is ptr, auxint is offset (atomic ops have auxint 0)
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpARM64DUFFZERO, ssa.OpARM64LoweredZero:
// arg0 is ptr
if w.Args[0] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpARM64LoweredMove:
// arg0 is dst ptr, arg1 is src ptr
if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
default:
}
if w.Type.IsMemory() || w.Type.IsTuple() && w.Type.FieldType(1).IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(arm64.AMOVB) p := gc.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
...@@ -920,7 +855,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -920,7 +855,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line) s.SetLineno(b.Line)
switch b.Kind { switch b.Kind {
case ssa.BlockPlain, ssa.BlockCheck: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
......
...@@ -3129,20 +3129,13 @@ func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value { ...@@ -3129,20 +3129,13 @@ func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value {
} }
// nilCheck generates nil pointer checking code. // nilCheck generates nil pointer checking code.
// Starts a new block on return, unless nil checks are disabled.
// Used only for automatically inserted nil checks, // Used only for automatically inserted nil checks,
// not for user code like 'x != nil'. // not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) { func (s *state) nilCheck(ptr *ssa.Value) {
if Disable_checknil != 0 { if Disable_checknil != 0 {
return return
} }
chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
b := s.endBlock()
b.Kind = ssa.BlockCheck
b.SetControl(chk)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
s.startBlock(bNext)
} }
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
......
...@@ -638,65 +638,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -638,65 +638,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.Maxarg = v.AuxInt gc.Maxarg = v.AuxInt
} }
case ssa.OpMIPS64LoweredNilCheck: case ssa.OpMIPS64LoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpMIPS64MOVBload, ssa.OpMIPS64MOVBUload, ssa.OpMIPS64MOVHload, ssa.OpMIPS64MOVHUload,
ssa.OpMIPS64MOVWload, ssa.OpMIPS64MOVWUload, ssa.OpMIPS64MOVVload,
ssa.OpMIPS64MOVFload, ssa.OpMIPS64MOVDload,
ssa.OpMIPS64MOVBstore, ssa.OpMIPS64MOVHstore, ssa.OpMIPS64MOVWstore, ssa.OpMIPS64MOVVstore,
ssa.OpMIPS64MOVFstore, ssa.OpMIPS64MOVDstore,
ssa.OpMIPS64MOVBstorezero, ssa.OpMIPS64MOVHstorezero, ssa.OpMIPS64MOVWstorezero, ssa.OpMIPS64MOVVstorezero:
// arg0 is ptr, auxint is offset
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpMIPS64DUFFZERO, ssa.OpMIPS64LoweredZero:
// arg0 is ptr
if w.Args[0] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpMIPS64LoweredMove:
// arg0 is dst ptr, arg1 is src ptr
if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
default:
}
if w.Type.IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(mips.AMOVB) p := gc.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
...@@ -765,7 +706,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -765,7 +706,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line) s.SetLineno(b.Line)
switch b.Kind { switch b.Kind {
case ssa.BlockPlain, ssa.BlockCheck: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
......
...@@ -850,64 +850,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -850,64 +850,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.CheckLoweredPhi(v) gc.CheckLoweredPhi(v)
case ssa.OpPPC64LoweredNilCheck: case ssa.OpPPC64LoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpPPC64MOVBload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVHZload,
ssa.OpPPC64MOVWload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVDload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload,
ssa.OpPPC64MOVBstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVWstore,
ssa.OpPPC64MOVDstore, ssa.OpPPC64FMOVSstore, ssa.OpPPC64FMOVDstore,
ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
// arg0 is ptr, auxint is offset
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpPPC64LoweredZero: // ssa.OpPPC64DUFFZERO,
// arg0 is ptr
if w.Args[0] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpPPC64LoweredMove: // ssa.OpPPC64DUFFCOPY,
// arg0 is dst ptr, arg1 is src ptr
if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
default:
}
if w.Type.IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(ppc64.AMOVB) p := gc.Prog(ppc64.AMOVB)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
...@@ -972,7 +914,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -972,7 +914,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
} }
case ssa.BlockPlain, ssa.BlockCheck: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
......
...@@ -611,67 +611,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -611,67 +611,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT: case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpS390XLoweredNilCheck: case ssa.OpS390XLoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpS390XMOVDload,
ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
ssa.OpS390XMOVBZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVWZload,
ssa.OpS390XMOVHBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVDBRload,
ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload,
ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore,
ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4,
ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4:
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst,
ssa.OpS390XCLEAR:
off := ssa.ValAndOff(v.AuxInt).Off()
if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpS390XMVC:
off := ssa.ValAndOff(v.AuxInt).Off()
if (w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0]) && w.Aux == nil && off >= 0 && off < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
}
if w.Type.IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if the input is nil. // Issue a load which will fault if the input is nil.
p := gc.Prog(s390x.AMOVBZ) p := gc.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
...@@ -821,7 +760,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -821,7 +760,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line) s.SetLineno(b.Line)
switch b.Kind { switch b.Kind {
case ssa.BlockPlain, ssa.BlockCheck: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(s390x.ABR) p := gc.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
......
...@@ -90,16 +90,6 @@ func checkFunc(f *Func) { ...@@ -90,16 +90,6 @@ func checkFunc(f *Func) {
if !b.Control.Type.IsMemory() { if !b.Control.Type.IsMemory() {
f.Fatalf("defer block %s has non-memory control value %s", b, b.Control.LongString()) f.Fatalf("defer block %s has non-memory control value %s", b, b.Control.LongString())
} }
case BlockCheck:
if len(b.Succs) != 1 {
f.Fatalf("check block %s len(Succs)==%d, want 1", b, len(b.Succs))
}
if b.Control == nil {
f.Fatalf("check block %s has no control value", b)
}
if !b.Control.Type.IsVoid() {
f.Fatalf("check block %s has non-void control value %s", b, b.Control.LongString())
}
case BlockFirst: case BlockFirst:
if len(b.Succs) != 2 { if len(b.Succs) != 2 {
f.Fatalf("plain/dead block %s len(Succs)==%d, want 2", b, len(b.Succs)) f.Fatalf("plain/dead block %s len(Succs)==%d, want 2", b, len(b.Succs))
......
...@@ -274,8 +274,9 @@ var passes = [...]pass{ ...@@ -274,8 +274,9 @@ var passes = [...]pass{
{name: "late deadcode", fn: deadcode}, {name: "late deadcode", fn: deadcode},
{name: "critical", fn: critical, required: true}, // remove critical edges {name: "critical", fn: critical, required: true}, // remove critical edges
{name: "likelyadjust", fn: likelyadjust}, {name: "likelyadjust", fn: likelyadjust},
{name: "layout", fn: layout, required: true}, // schedule blocks {name: "layout", fn: layout, required: true}, // schedule blocks
{name: "schedule", fn: schedule, required: true}, // schedule values {name: "schedule", fn: schedule, required: true}, // schedule values
{name: "late nilcheck", fn: nilcheckelim2},
{name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register {name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register
{name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots {name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots
{name: "trim", fn: trim}, // remove empty blocks {name: "trim", fn: trim}, // remove empty blocks
...@@ -329,6 +330,8 @@ var passOrder = [...]constraint{ ...@@ -329,6 +330,8 @@ var passOrder = [...]constraint{
// checkLower must run after lowering & subsequent dead code elim // checkLower must run after lowering & subsequent dead code elim
{"lower", "checkLower"}, {"lower", "checkLower"},
{"lowered deadcode", "checkLower"}, {"lowered deadcode", "checkLower"},
// late nilcheck needs instructions to be scheduled.
{"schedule", "late nilcheck"},
// flagalloc needs instructions to be scheduled. // flagalloc needs instructions to be scheduled.
{"schedule", "flagalloc"}, {"schedule", "flagalloc"},
// regalloc needs flags to be allocated first. // regalloc needs flags to be allocated first.
......
...@@ -68,6 +68,11 @@ func liveValues(f *Func, reachable []bool) []bool { ...@@ -68,6 +68,11 @@ func liveValues(f *Func, reachable []bool) []bool {
live[v.ID] = true live[v.ID] = true
q = append(q, v) q = append(q, v)
} }
if v.Type.IsVoid() && !live[v.ID] {
// The only Void ops are nil checks. We must keep these.
live[v.ID] = true
q = append(q, v)
}
} }
} }
......
...@@ -157,21 +157,21 @@ func init() { ...@@ -157,21 +157,21 @@ func init() {
{name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div
{name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div
{name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff"}, // fp32 load {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true}, // fp32 load
{name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff"}, // fp64 load {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true}, // fp64 load
{name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant
{name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant
{name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff"}, // fp32 load indexed by i {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff"}, // fp32 load indexed by i
{name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff"}, // fp32 load indexed by 4*i {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff"}, // fp32 load indexed by 4*i
{name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff"}, // fp64 load indexed by i {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff"}, // fp64 load indexed by i
{name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff"}, // fp64 load indexed by 8*i {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff"}, // fp64 load indexed by 8*i
{name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff"}, // fp32 store {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true}, // fp32 store
{name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff"}, // fp64 store {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true}, // fp64 store
{name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff"}, // fp32 indexed by i store {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff"}, // fp32 indexed by i store
{name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff"}, // fp32 indexed by 4i store {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff"}, // fp32 indexed by 4i store
{name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by i store {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by i store
{name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by 8i store {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by 8i store
// binary ops // binary ops
{name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1 {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
...@@ -323,14 +323,14 @@ func init() { ...@@ -323,14 +323,14 @@ func init() {
// Note: LEAL{1,2,4,8} must not have OpSB as either argument. // Note: LEAL{1,2,4,8} must not have OpSB as either argument.
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend. {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
{name: "MOVBLSXload", argLength: 2, reg: gpload, asm: "MOVBLSX", aux: "SymOff"}, // ditto, sign extend to int32 {name: "MOVBLSXload", argLength: 2, reg: gpload, asm: "MOVBLSX", aux: "SymOff", faultOnNilArg0: true}, // ditto, sign extend to int32
{name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend. {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
{name: "MOVWLSXload", argLength: 2, reg: gpload, asm: "MOVWLSX", aux: "SymOff"}, // ditto, sign extend to int32 {name: "MOVWLSXload", argLength: 2, reg: gpload, asm: "MOVWLSX", aux: "SymOff", faultOnNilArg0: true}, // ditto, sign extend to int32
{name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend. {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
{name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
// indexed loads/stores // indexed loads/stores
{name: "MOVBloadidx1", argLength: 3, reg: gploadidx, asm: "MOVBLZX", aux: "SymOff"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, asm: "MOVBLZX", aux: "SymOff"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
...@@ -349,9 +349,9 @@ func init() { ...@@ -349,9 +349,9 @@ func init() {
// For storeconst ops, the AuxInt field encodes both // For storeconst ops, the AuxInt field encodes both
// the value to store and an address offset of the store. // the value to store and an address offset of the store.
// Cast AuxInt to a ValAndOff to extract Val and Off fields. // Cast AuxInt to a ValAndOff to extract Val and Off fields.
{name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
{name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ... {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true}, // store low 2 bytes of ...
{name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem"}, // store low 4 bytes of ... {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true}, // store low 4 bytes of ...
{name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVB", aux: "SymValAndOff", typ: "Mem"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVB", aux: "SymValAndOff", typ: "Mem"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem
{name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ... arg1 ... {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ... arg1 ...
...@@ -439,7 +439,7 @@ func init() { ...@@ -439,7 +439,7 @@ func init() {
// use of DX (the closure pointer) // use of DX (the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}}, {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}},
//arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true}, {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true},
// MOVLconvert converts between pointers and integers. // MOVLconvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC // We have a special op for this so as to not confuse GC
......
...@@ -314,19 +314,19 @@ func init() { ...@@ -314,19 +314,19 @@ func init() {
{name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
{name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW"}, // load from arg0 + arg1. arg2=mem {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW"}, // load from arg0 + arg1. arg2=mem
{name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1<<auxInt. arg2=mem {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1<<auxInt. arg2=mem
...@@ -370,7 +370,7 @@ func init() { ...@@ -370,7 +370,7 @@ func init() {
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// pseudo-ops // pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}}, // panic if arg0 is nil. arg1=mem. {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true}, // panic if arg0 is nil. arg1=mem.
{name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise. {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
{name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise. {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
...@@ -397,6 +397,7 @@ func init() { ...@@ -397,6 +397,7 @@ func init() {
inputs: []regMask{buildReg("R1"), buildReg("R0")}, inputs: []regMask{buildReg("R1"), buildReg("R0")},
clobbers: buildReg("R1"), clobbers: buildReg("R1"),
}, },
faultOnNilArg0: true,
}, },
// duffcopy (must be 4-byte aligned) // duffcopy (must be 4-byte aligned)
...@@ -413,6 +414,8 @@ func init() { ...@@ -413,6 +414,8 @@ func init() {
inputs: []regMask{buildReg("R2"), buildReg("R1")}, inputs: []regMask{buildReg("R2"), buildReg("R1")},
clobbers: buildReg("R0 R1 R2"), clobbers: buildReg("R0 R1 R2"),
}, },
faultOnNilArg0: true,
faultOnNilArg1: true,
}, },
// large or unaligned zeroing // large or unaligned zeroing
...@@ -432,7 +435,8 @@ func init() { ...@@ -432,7 +435,8 @@ func init() {
inputs: []regMask{buildReg("R1"), gp, gp}, inputs: []regMask{buildReg("R1"), gp, gp},
clobbers: buildReg("R1"), clobbers: buildReg("R1"),
}, },
clobberFlags: true, clobberFlags: true,
faultOnNilArg0: true,
}, },
// large or unaligned move // large or unaligned move
...@@ -453,7 +457,9 @@ func init() { ...@@ -453,7 +457,9 @@ func init() {
inputs: []regMask{buildReg("R2"), buildReg("R1"), gp}, inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
clobbers: buildReg("R1 R2"), clobbers: buildReg("R1 R2"),
}, },
clobberFlags: true, clobberFlags: true,
faultOnNilArg0: true,
faultOnNilArg1: true,
}, },
// Scheduler ensures LoweredGetClosurePtr occurs only in entry block, // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
......
...@@ -219,27 +219,27 @@ func init() { ...@@ -219,27 +219,27 @@ func init() {
{name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
{name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64"}, // load from arg0 + auxInt + aux. arg1=mem. {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true}, // load from arg0 + auxInt + aux. arg1=mem.
{name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
{name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
{name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
{name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem. {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
// conversions // conversions
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
...@@ -284,6 +284,7 @@ func init() { ...@@ -284,6 +284,7 @@ func init() {
inputs: []regMask{gp}, inputs: []regMask{gp},
clobbers: buildReg("R1"), clobbers: buildReg("R1"),
}, },
faultOnNilArg0: true,
}, },
// large or unaligned zeroing // large or unaligned zeroing
...@@ -304,7 +305,8 @@ func init() { ...@@ -304,7 +305,8 @@ func init() {
inputs: []regMask{buildReg("R1"), gp}, inputs: []regMask{buildReg("R1"), gp},
clobbers: buildReg("R1"), clobbers: buildReg("R1"),
}, },
clobberFlags: true, clobberFlags: true,
faultOnNilArg0: true,
}, },
// large or unaligned move // large or unaligned move
...@@ -328,11 +330,13 @@ func init() { ...@@ -328,11 +330,13 @@ func init() {
inputs: []regMask{buildReg("R2"), buildReg("R1"), gp}, inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
clobbers: buildReg("R1 R2"), clobbers: buildReg("R1 R2"),
}, },
clobberFlags: true, clobberFlags: true,
faultOnNilArg0: true,
faultOnNilArg1: true,
}, },
// pseudo-ops // pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}}, // panic if arg0 is nil. arg1=mem. {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true}, // panic if arg0 is nil. arg1=mem.
{name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
{name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
......
...@@ -227,33 +227,33 @@ func init() { ...@@ -227,33 +227,33 @@ func init() {
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
{name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true}, // arg0&aux // and-immediate sets CC on PPC, always. {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true}, // arg0&aux // and-immediate sets CC on PPC, always.
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64 {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64
{name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64 {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
{name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64 {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64
{name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64 {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64
{name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64 {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64
{name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64 {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64
{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8"}, // sign extend int8 to int64 {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true}, // sign extend int8 to int64
{name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8"}, // zero extend uint8 to uint64 {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true}, // zero extend uint8 to uint64
{name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16"}, // sign extend int16 to int64 {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true}, // sign extend int16 to int64
{name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16"}, // zero extend uint16 to uint64 {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true}, // zero extend uint16 to uint64
{name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32"}, // sign extend int32 to int64 {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true}, // sign extend int32 to int64
{name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32"}, // zero extend uint32 to uint64 {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true}, // zero extend uint32 to uint64
{name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64"}, {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true},
{name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64"}, {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true},
{name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32"}, {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true},
{name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem"}, {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true},
{name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem"}, {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true},
{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem"}, {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true},
{name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem"}, {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true},
{name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem"}, {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true},
{name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem"}, {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true},
{name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem"}, // store zero byte to arg0+aux. arg1=mem {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store zero byte to arg0+aux. arg1=mem
{name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem"}, // store zero 2 bytes to ... {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store zero 2 bytes to ...
{name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem"}, // store zero 4 bytes to ... {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store zero 4 bytes to ...
{name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem"}, // store zero 8 bytes to ... {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store zero 8 bytes to ...
{name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
...@@ -290,7 +290,7 @@ func init() { ...@@ -290,7 +290,7 @@ func init() {
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}}, {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}},
//arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true}, {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true},
// Convert pointer to integer, takes a memory operand for ordering. // Convert pointer to integer, takes a memory operand for ordering.
{name: "MOVDconvert", argLength: 2, reg: gp11, asm: "MOVD"}, {name: "MOVDconvert", argLength: 2, reg: gp11, asm: "MOVD"},
...@@ -318,8 +318,9 @@ func init() { ...@@ -318,8 +318,9 @@ func init() {
inputs: []regMask{buildReg("R3"), gp}, inputs: []regMask{buildReg("R3"), gp},
clobbers: buildReg("R3"), clobbers: buildReg("R3"),
}, },
clobberFlags: true, clobberFlags: true,
typ: "Mem", typ: "Mem",
faultOnNilArg0: true,
}, },
// large or unaligned move // large or unaligned move
...@@ -342,8 +343,10 @@ func init() { ...@@ -342,8 +343,10 @@ func init() {
inputs: []regMask{buildReg("R3"), buildReg("R4"), gp}, inputs: []regMask{buildReg("R3"), buildReg("R4"), gp},
clobbers: buildReg("R3 R4"), clobbers: buildReg("R3 R4"),
}, },
clobberFlags: true, clobberFlags: true,
typ: "Mem", typ: "Mem",
faultOnNilArg0: true,
faultOnNilArg1: true,
}, },
// (InvertFlags (CMP a b)) == (CMP b a) // (InvertFlags (CMP a b)) == (CMP b a)
......
...@@ -770,7 +770,7 @@ ...@@ -770,7 +770,7 @@
(ConstNil <config.fe.TypeBytePtr()>) (ConstNil <config.fe.TypeBytePtr()>)
(ConstNil <config.fe.TypeBytePtr()>)) (ConstNil <config.fe.TypeBytePtr()>))
(Check (NilCheck (GetG _) _) next) -> (Plain nil next) (NilCheck (GetG mem) mem) -> mem
(If (Not cond) yes no) -> (If cond no yes) (If (Not cond) yes no) -> (If cond no yes)
(If (ConstBool [c]) yes no) && c == 1 -> (First nil yes no) (If (ConstBool [c]) yes no) && c == 1 -> (First nil yes no)
...@@ -951,16 +951,20 @@ ...@@ -951,16 +951,20 @@
(Sqrt (Const64F [c])) -> (Const64F [f2i(math.Sqrt(i2f(c)))]) (Sqrt (Const64F [c])) -> (Const64F [f2i(math.Sqrt(i2f(c)))])
// recognize runtime.newobject and don't Zero/Nilcheck it // recognize runtime.newobject and don't Zero/Nilcheck it
(Zero (Load (OffPtr [c] (SP)) mem:(StaticCall {sym} _)) mem2) (Zero (Load (OffPtr [c] (SP)) mem) mem)
&& mem.Op == OpStaticCall
&& isSameSym(mem.Aux, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.PtrSize // offset of return value && c == config.ctxt.FixedFrameSize() + config.PtrSize // offset of return value
&& mem2 == mem
&& isSameSym(sym, "runtime.newobject")
-> mem -> mem
(Check (NilCheck (Load (OffPtr [c] (SP)) mem:(StaticCall {sym} _)) _) succ) // nil checks just need to rewrite to something useless.
&& c == config.ctxt.FixedFrameSize() + config.PtrSize // offset of return value // they will be deadcode eliminated soon afterwards.
&& isSameSym(sym, "runtime.newobject") //(NilCheck (Load (OffPtr [c] (SP)) mem) mem)
-> (Plain nil succ) // && mem.Op == OpStaticCall
(Check (NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem:(StaticCall {sym} _))) _) succ) // && isSameSym(mem.Aux, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.PtrSize // offset of return value // && c == config.ctxt.FixedFrameSize() + config.PtrSize // offset of return value
&& isSameSym(sym, "runtime.newobject") // -> (Invalid)
-> (Plain nil succ) //(NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem)) mem)
// && mem.Op == OpStaticCall
// && isSameSym(mem.Aux, "runtime.newobject")
// && c == config.ctxt.FixedFrameSize() + config.PtrSize // offset of return value
// -> (Invalid)
...@@ -303,6 +303,7 @@ var genericOps = []opData{ ...@@ -303,6 +303,7 @@ var genericOps = []opData{
{name: "SP"}, // stack pointer {name: "SP"}, // stack pointer
{name: "SB", typ: "Uintptr"}, // static base pointer (a.k.a. globals pointer) {name: "SB", typ: "Uintptr"}, // static base pointer (a.k.a. globals pointer)
{name: "Func", aux: "Sym"}, // entry address of a function {name: "Func", aux: "Sym"}, // entry address of a function
{name: "Invalid"}, // unused value
// Memory operations // Memory operations
{name: "Load", argLength: 2}, // Load from arg0. arg1=memory {name: "Load", argLength: 2}, // Load from arg0. arg1=memory
...@@ -354,7 +355,7 @@ var genericOps = []opData{ ...@@ -354,7 +355,7 @@ var genericOps = []opData{
{name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil {name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
{name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0. {name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
{name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0. {name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
{name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil, returns void. {name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
// Pseudo-ops // Pseudo-ops
{name: "GetG", argLength: 1}, // runtime.getg() (read g pointer). arg0=mem {name: "GetG", argLength: 1}, // runtime.getg() (read g pointer). arg0=mem
...@@ -476,7 +477,6 @@ var genericBlocks = []blockData{ ...@@ -476,7 +477,6 @@ var genericBlocks = []blockData{
{name: "Plain"}, // a single successor {name: "Plain"}, // a single successor
{name: "If"}, // 2 successors, if control goto Succs[0] else goto Succs[1] {name: "If"}, // 2 successors, if control goto Succs[0] else goto Succs[1]
{name: "Defer"}, // 2 successors, Succs[0]=defer queued, Succs[1]=defer recovered. control is call op (of memory type) {name: "Defer"}, // 2 successors, Succs[0]=defer queued, Succs[1]=defer recovered. control is call op (of memory type)
{name: "Check"}, // 1 successor, control is nilcheck op (of void type)
{name: "Ret"}, // no successors, control value is memory result {name: "Ret"}, // no successors, control value is memory result
{name: "RetJmp"}, // no successors, jumps to b.Aux.(*gc.Sym) {name: "RetJmp"}, // no successors, jumps to b.Aux.(*gc.Sym)
{name: "Exit"}, // no successors, control value generates a panic {name: "Exit"}, // no successors, control value generates a panic
......
...@@ -47,6 +47,9 @@ type opData struct { ...@@ -47,6 +47,9 @@ type opData struct {
resultNotInArgs bool // outputs must not be allocated to the same registers as inputs resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
clobberFlags bool // this op clobbers flags register clobberFlags bool // this op clobbers flags register
call bool // is a function call call bool // is a function call
nilCheck bool // this op is a nil check on arg0
faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
} }
type blockData struct { type blockData struct {
...@@ -126,10 +129,13 @@ func genOp() { ...@@ -126,10 +129,13 @@ func genOp() {
// generate Op* declarations // generate Op* declarations
fmt.Fprintln(w, "const (") fmt.Fprintln(w, "const (")
fmt.Fprintln(w, "OpInvalid Op = iota") fmt.Fprintln(w, "OpInvalid Op = iota") // make sure OpInvalid is 0.
for _, a := range archs { for _, a := range archs {
fmt.Fprintln(w) fmt.Fprintln(w)
for _, v := range a.ops { for _, v := range a.ops {
if v.name == "Invalid" {
continue
}
fmt.Fprintf(w, "Op%s%s\n", a.Name(), v.name) fmt.Fprintf(w, "Op%s%s\n", a.Name(), v.name)
} }
} }
...@@ -143,6 +149,9 @@ func genOp() { ...@@ -143,6 +149,9 @@ func genOp() {
pkg := path.Base(a.pkg) pkg := path.Base(a.pkg)
for _, v := range a.ops { for _, v := range a.ops {
if v.name == "Invalid" {
continue
}
fmt.Fprintln(w, "{") fmt.Fprintln(w, "{")
fmt.Fprintf(w, "name:\"%s\",\n", v.name) fmt.Fprintf(w, "name:\"%s\",\n", v.name)
...@@ -179,6 +188,21 @@ func genOp() { ...@@ -179,6 +188,21 @@ func genOp() {
if v.call { if v.call {
fmt.Fprintln(w, "call: true,") fmt.Fprintln(w, "call: true,")
} }
if v.nilCheck {
fmt.Fprintln(w, "nilCheck: true,")
}
if v.faultOnNilArg0 {
fmt.Fprintln(w, "faultOnNilArg0: true,")
if v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "" {
log.Fatalf("faultOnNilArg0 with aux %s not allowed", v.aux)
}
}
if v.faultOnNilArg1 {
fmt.Fprintln(w, "faultOnNilArg1: true,")
if v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "" {
log.Fatalf("faultOnNilArg1 with aux %s not allowed", v.aux)
}
}
if a.name == "generic" { if a.name == "generic" {
fmt.Fprintln(w, "generic:true,") fmt.Fprintln(w, "generic:true,")
fmt.Fprintln(w, "},") // close op fmt.Fprintln(w, "},") // close op
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package ssa package ssa
// nilcheckelim eliminates unnecessary nil checks. // nilcheckelim eliminates unnecessary nil checks.
// runs on machine-independent code.
func nilcheckelim(f *Func) { func nilcheckelim(f *Func) {
// A nil check is redundant if the same nil check was successful in a // A nil check is redundant if the same nil check was successful in a
// dominating block. The efficacy of this pass depends heavily on the // dominating block. The efficacy of this pass depends heavily on the
...@@ -26,14 +27,13 @@ func nilcheckelim(f *Func) { ...@@ -26,14 +27,13 @@ func nilcheckelim(f *Func) {
type walkState int type walkState int
const ( const (
Work walkState = iota // clear nil check if we should and traverse to dominees regardless Work walkState = iota // process nil checks and traverse to dominees
RecPtr // record the pointer as being nil checked ClearPtr // forget the fact that ptr is nil
ClearPtr
) )
type bp struct { type bp struct {
block *Block // block, or nil in RecPtr/ClearPtr state block *Block // block, or nil in ClearPtr state
ptr *Value // if non-nil, ptr that is to be set/cleared in RecPtr/ClearPtr state ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state
op walkState op walkState
} }
...@@ -76,54 +76,62 @@ func nilcheckelim(f *Func) { ...@@ -76,54 +76,62 @@ func nilcheckelim(f *Func) {
switch node.op { switch node.op {
case Work: case Work:
checked := checkedptr(node.block) // ptr being checked for nil/non-nil b := node.block
nonnil := nonnilptr(node.block) // ptr that is non-nil due to this blocks pred
// First, see if we're dominated by an explicit nil check.
if checked != nil { if len(b.Preds) == 1 {
// already have a nilcheck in the dominator path, or this block is a success p := b.Preds[0].b
// block for the same value it is checking if p.Kind == BlockIf && p.Control.Op == OpIsNonNil && p.Succs[0].b == b {
if nonNilValues[checked.ID] || checked == nonnil { ptr := p.Control.Args[0]
// Eliminate the nil check. if !nonNilValues[ptr.ID] {
// The deadcode pass will remove vestigial values, nonNilValues[ptr.ID] = true
// and the fuse pass will join this block with its successor. work = append(work, bp{op: ClearPtr, ptr: ptr})
// Logging in the style of the former compiler -- and omit line 1,
// which is usually in generated code.
if f.Config.Debug_checknil() && node.block.Control.Line > 1 {
f.Config.Warnl(node.block.Control.Line, "removed nil check")
} }
}
}
switch node.block.Kind { // Next, process values in the block.
case BlockIf: i := 0
node.block.Kind = BlockFirst for _, v := range b.Values {
node.block.SetControl(nil) b.Values[i] = v
case BlockCheck: i++
node.block.Kind = BlockPlain switch v.Op {
node.block.SetControl(nil) case OpIsNonNil:
default: ptr := v.Args[0]
f.Fatalf("bad block kind in nilcheck %s", node.block.Kind) if nonNilValues[ptr.ID] {
// This is a redundant explicit nil check.
v.reset(OpConstBool)
v.AuxInt = 1 // true
} }
case OpNilCheck:
ptr := v.Args[0]
if nonNilValues[ptr.ID] {
// This is a redundant implicit nil check.
// Logging in the style of the former compiler -- and omit line 1,
// which is usually in generated code.
if f.Config.Debug_checknil() && v.Line > 1 {
f.Config.Warnl(v.Line, "removed nil check")
}
v.reset(OpUnknown)
i--
continue
}
// Record the fact that we know ptr is non nil, and remember to
// undo that information when this dominator subtree is done.
nonNilValues[ptr.ID] = true
work = append(work, bp{op: ClearPtr, ptr: ptr})
} }
} }
for j := i; j < len(b.Values); j++ {
if nonnil != nil && !nonNilValues[nonnil.ID] { b.Values[j] = nil
// this is a new nilcheck so add a ClearPtr node to clear the
// ptr from the map of nil checks once we traverse
// back up the tree
work = append(work, bp{op: ClearPtr, ptr: nonnil})
} }
b.Values = b.Values[:i]
// add all dominated blocks to the work list // Add all dominated blocks to the work list.
for _, w := range domTree[node.block.ID] { for _, w := range domTree[node.block.ID] {
work = append(work, bp{block: w}) work = append(work, bp{op: Work, block: w})
} }
if nonnil != nil && !nonNilValues[nonnil.ID] {
work = append(work, bp{op: RecPtr, ptr: nonnil})
}
case RecPtr:
nonNilValues[node.ptr.ID] = true
continue
case ClearPtr: case ClearPtr:
nonNilValues[node.ptr.ID] = false nonNilValues[node.ptr.ID] = false
continue continue
...@@ -131,31 +139,86 @@ func nilcheckelim(f *Func) { ...@@ -131,31 +139,86 @@ func nilcheckelim(f *Func) {
} }
} }
// checkedptr returns the Value, if any, // All platforms are guaranteed to fault if we load/store to anything smaller than this address.
// that is used in a nil check in b's Control op. const minZeroPage = 4096
func checkedptr(b *Block) *Value {
if b.Kind == BlockCheck {
return b.Control.Args[0]
}
if b.Kind == BlockIf && b.Control.Op == OpIsNonNil {
return b.Control.Args[0]
}
return nil
}
// nonnilptr returns the Value, if any, // nilcheckelim2 eliminates unnecessary nil checks.
// that is non-nil due to b being the successor block // Runs after lowering and scheduling.
// of an OpIsNonNil or OpNilCheck block for the value and having a single func nilcheckelim2(f *Func) {
// predecessor. unnecessary := f.newSparseSet(f.NumValues())
func nonnilptr(b *Block) *Value { defer f.retSparseSet(unnecessary)
if len(b.Preds) == 1 { for _, b := range f.Blocks {
bp := b.Preds[0].b // Walk the block backwards. Find instructions that will fault if their
if bp.Kind == BlockCheck { // input pointer is nil. Remove nil checks on those pointers, as the
return bp.Control.Args[0] // faulting instruction effectively does the nil check for free.
unnecessary.clear()
for i := len(b.Values) - 1; i >= 0; i-- {
v := b.Values[i]
if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) {
if f.Config.Debug_checknil() && int(v.Line) > 1 {
f.Config.Warnl(v.Line, "removed nil check")
}
v.reset(OpUnknown)
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
if v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive {
// These ops don't really change memory.
continue
}
// This op changes memory. Any faulting instruction after v that
// we've recorded in the unnecessary map is now obsolete.
unnecessary.clear()
}
// Find any pointers that this op is guaranteed to fault on if nil.
var ptrstore [2]*Value
ptrs := ptrstore[:0]
if opcodeTable[v.Op].faultOnNilArg0 {
ptrs = append(ptrs, v.Args[0])
}
if opcodeTable[v.Op].faultOnNilArg1 {
ptrs = append(ptrs, v.Args[1])
}
for _, ptr := range ptrs {
// Check to make sure the offset is small.
switch opcodeTable[v.Op].auxType {
case auxSymOff:
if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage {
continue
}
case auxSymValAndOff:
off := ValAndOff(v.AuxInt).Off()
if v.Aux != nil || off < 0 || off >= minZeroPage {
continue
}
case auxInt64:
// ARM uses this auxType for duffcopy/duffzero/alignment info.
// It does not affect the effective address.
case auxNone:
// offset is zero.
default:
v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType))
}
// This instruction is guaranteed to fault if ptr is nil.
// Any previous nil check op is unnecessary.
unnecessary.add(ptr.ID)
}
}
// Remove values we've clobbered with OpUnknown.
i := 0
for _, v := range b.Values {
if v.Op != OpUnknown {
b.Values[i] = v
i++
}
} }
if bp.Kind == BlockIf && bp.Control.Op == OpIsNonNil && bp.Succs[0].b == b { for j := i; j < len(b.Values); j++ {
return bp.Control.Args[0] b.Values[j] = nil
} }
b.Values = b.Values[:i]
// TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find
// more unnecessary nil checks. Would fix test/nilptr3_ssa.go:157.
} }
return nil
} }
...@@ -30,6 +30,9 @@ type opInfo struct { ...@@ -30,6 +30,9 @@ type opInfo struct {
resultNotInArgs bool // outputs must not be allocated to the same registers as inputs resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
clobberFlags bool // this op clobbers flags register clobberFlags bool // this op clobbers flags register
call bool // is a function call call bool // is a function call
nilCheck bool // this op is a nil check on arg0
faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
} }
type inputInfo struct { type inputInfo struct {
......
This diff is collapsed.
...@@ -232,6 +232,8 @@ func rewriteValuegeneric(v *Value, config *Config) bool { ...@@ -232,6 +232,8 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
return rewriteValuegeneric_OpNeqPtr(v, config) return rewriteValuegeneric_OpNeqPtr(v, config)
case OpNeqSlice: case OpNeqSlice:
return rewriteValuegeneric_OpNeqSlice(v, config) return rewriteValuegeneric_OpNeqSlice(v, config)
case OpNilCheck:
return rewriteValuegeneric_OpNilCheck(v, config)
case OpNot: case OpNot:
return rewriteValuegeneric_OpNot(v, config) return rewriteValuegeneric_OpNot(v, config)
case OpOffPtr: case OpOffPtr:
...@@ -6358,6 +6360,28 @@ func rewriteValuegeneric_OpNeqSlice(v *Value, config *Config) bool { ...@@ -6358,6 +6360,28 @@ func rewriteValuegeneric_OpNeqSlice(v *Value, config *Config) bool {
return true return true
} }
} }
func rewriteValuegeneric_OpNilCheck(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (NilCheck (GetG mem) mem)
// cond:
// result: mem
for {
v_0 := v.Args[0]
if v_0.Op != OpGetG {
break
}
mem := v_0.Args[0]
if mem != v.Args[1] {
break
}
v.reset(OpCopy)
v.Type = mem.Type
v.AddArg(mem)
return true
}
return false
}
func rewriteValuegeneric_OpNot(v *Value, config *Config) bool { func rewriteValuegeneric_OpNot(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
...@@ -11611,8 +11635,8 @@ func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool { ...@@ -11611,8 +11635,8 @@ func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool {
func rewriteValuegeneric_OpZero(v *Value, config *Config) bool { func rewriteValuegeneric_OpZero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Zero (Load (OffPtr [c] (SP)) mem:(StaticCall {sym} _)) mem2) // match: (Zero (Load (OffPtr [c] (SP)) mem) mem)
// cond: c == config.ctxt.FixedFrameSize() + config.PtrSize && mem2 == mem && isSameSym(sym, "runtime.newobject") // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.PtrSize
// result: mem // result: mem
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -11629,12 +11653,10 @@ func rewriteValuegeneric_OpZero(v *Value, config *Config) bool { ...@@ -11629,12 +11653,10 @@ func rewriteValuegeneric_OpZero(v *Value, config *Config) bool {
break break
} }
mem := v_0.Args[1] mem := v_0.Args[1]
if mem.Op != OpStaticCall { if mem != v.Args[1] {
break break
} }
sym := mem.Aux if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.PtrSize) {
mem2 := v.Args[1]
if !(c == config.ctxt.FixedFrameSize()+config.PtrSize && mem2 == mem && isSameSym(sym, "runtime.newobject")) {
break break
} }
v.reset(OpCopy) v.reset(OpCopy)
...@@ -11646,99 +11668,6 @@ func rewriteValuegeneric_OpZero(v *Value, config *Config) bool { ...@@ -11646,99 +11668,6 @@ func rewriteValuegeneric_OpZero(v *Value, config *Config) bool {
} }
func rewriteBlockgeneric(b *Block, config *Config) bool { func rewriteBlockgeneric(b *Block, config *Config) bool {
switch b.Kind { switch b.Kind {
case BlockCheck:
// match: (Check (NilCheck (GetG _) _) next)
// cond:
// result: (Plain nil next)
for {
v := b.Control
if v.Op != OpNilCheck {
break
}
v_0 := v.Args[0]
if v_0.Op != OpGetG {
break
}
next := b.Succs[0]
b.Kind = BlockPlain
b.SetControl(nil)
_ = next
return true
}
// match: (Check (NilCheck (Load (OffPtr [c] (SP)) mem:(StaticCall {sym} _)) _) succ)
// cond: c == config.ctxt.FixedFrameSize() + config.PtrSize && isSameSym(sym, "runtime.newobject")
// result: (Plain nil succ)
for {
v := b.Control
if v.Op != OpNilCheck {
break
}
v_0 := v.Args[0]
if v_0.Op != OpLoad {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpOffPtr {
break
}
c := v_0_0.AuxInt
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpSP {
break
}
mem := v_0.Args[1]
if mem.Op != OpStaticCall {
break
}
sym := mem.Aux
succ := b.Succs[0]
if !(c == config.ctxt.FixedFrameSize()+config.PtrSize && isSameSym(sym, "runtime.newobject")) {
break
}
b.Kind = BlockPlain
b.SetControl(nil)
_ = succ
return true
}
// match: (Check (NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem:(StaticCall {sym} _))) _) succ)
// cond: c == config.ctxt.FixedFrameSize() + config.PtrSize && isSameSym(sym, "runtime.newobject")
// result: (Plain nil succ)
for {
v := b.Control
if v.Op != OpNilCheck {
break
}
v_0 := v.Args[0]
if v_0.Op != OpOffPtr {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLoad {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpOffPtr {
break
}
c := v_0_0_0.AuxInt
v_0_0_0_0 := v_0_0_0.Args[0]
if v_0_0_0_0.Op != OpSP {
break
}
mem := v_0_0.Args[1]
if mem.Op != OpStaticCall {
break
}
sym := mem.Aux
succ := b.Succs[0]
if !(c == config.ctxt.FixedFrameSize()+config.PtrSize && isSameSym(sym, "runtime.newobject")) {
break
}
b.Kind = BlockPlain
b.SetControl(nil)
_ = succ
return true
}
case BlockIf: case BlockIf:
// match: (If (Not cond) yes no) // match: (If (Not cond) yes no)
// cond: // cond:
......
...@@ -8,6 +8,7 @@ import "container/heap" ...@@ -8,6 +8,7 @@ import "container/heap"
const ( const (
ScorePhi = iota // towards top of block ScorePhi = iota // towards top of block
ScoreNilCheck
ScoreReadTuple ScoreReadTuple
ScoreVarDef ScoreVarDef
ScoreMemory ScoreMemory
...@@ -96,6 +97,9 @@ func schedule(f *Func) { ...@@ -96,6 +97,9 @@ func schedule(f *Func) {
f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String()) f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
} }
score[v.ID] = ScorePhi score[v.ID] = ScorePhi
case v.Op == OpAMD64LoweredNilCheck || v.Op == OpPPC64LoweredNilCheck || v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck || v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck:
// Nil checks must come before loads from the same address.
score[v.ID] = ScoreNilCheck
case v.Op == OpPhi: case v.Op == OpPhi:
// We want all the phis first. // We want all the phis first.
score[v.ID] = ScorePhi score[v.ID] = ScorePhi
......
...@@ -820,54 +820,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -820,54 +820,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpKeepAlive: case ssa.OpKeepAlive:
gc.KeepAlive(v) gc.KeepAlive(v)
case ssa.Op386LoweredNilCheck: case ssa.Op386LoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload,
ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore,
ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload,
ssa.Op386MOVSSload, ssa.Op386MOVSDload,
ssa.Op386MOVSSstore, ssa.Op386MOVSDstore:
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
off := ssa.ValAndOff(v.AuxInt).Off()
if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
}
if w.Type.IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if the input is nil. // Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg). // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
// Should we use the 3-byte TESTB $0, (reg) instead? It is larger // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
...@@ -925,7 +877,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { ...@@ -925,7 +877,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
} }
switch b.Kind { switch b.Kind {
case ssa.BlockPlain, ssa.BlockCheck: case ssa.BlockPlain:
if b.Succs[0].Block() != next { if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP) p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
......
...@@ -154,7 +154,7 @@ func f4(x *[10]int) { ...@@ -154,7 +154,7 @@ func f4(x *[10]int) {
// and the offset is small enough that if x is nil, the address will still be // and the offset is small enough that if x is nil, the address will still be
// in the first unmapped page of memory. // in the first unmapped page of memory.
_ = x[9] // ERROR "removed nil check" _ = x[9] // ERROR "generated nil check" // bug: would like to remove this check (but nilcheck and load are in different blocks)
for { for {
if x[9] != 0 { // ERROR "removed nil check" if x[9] != 0 { // ERROR "removed nil check"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment