Commit f5531782 authored by Cherry Zhang's avatar Cherry Zhang

[dev.ssa] cmd/compile: ensure alignment for Zero and Move in SSA for ARM

Encode the size and the alignment into AuxInt of Zero and Move ops.
On AMD64, we simply don't look at the alignment. On ARM and PPC64, we
only generate aligned stores.

Updates #15365.

Change-Id: Ifdcc205c364f67c4516b9adebfe7d50d223b6863
Reviewed-on: https://go-review.googlesource.com/24511Reviewed-by: default avatarDavid Chase <drchase@google.com>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 95427d25
...@@ -547,7 +547,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -547,7 +547,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
return return
} }
case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero: case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero, ssa.OpARMLoweredZeroU:
// arg0 is ptr // arg0 is ptr
if w.Args[0] == v.Args[0] { if w.Args[0] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 { if gc.Debug_checknil != 0 && int(v.Line) > 1 {
...@@ -555,7 +555,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -555,7 +555,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
return return
} }
case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove: case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove, ssa.OpARMLoweredMoveU:
// arg0 is dst ptr, arg1 is src ptr // arg0 is dst ptr, arg1 is src ptr
if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] { if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 { if gc.Debug_checknil != 0 && int(v.Line) > 1 {
...@@ -585,19 +585,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -585,19 +585,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Line, "generated nil check") gc.Warnl(v.Line, "generated nil check")
} }
case ssa.OpARMLoweredZero: case ssa.OpARMLoweredZero, ssa.OpARMLoweredZeroU:
// MOVW.P Rarg2, 4(R1) // MOVW.P Rarg2, 4(R1)
// CMP Rarg1, R1 // CMP Rarg1, R1
// BLT -2(PC) // BLT -2(PC)
// arg1 is the end of memory to zero // arg1 is the end of memory to zero
// arg2 is known to be zero // arg2 is known to be zero
p := gc.Prog(arm.AMOVW) var sz int64 = 4
mov := arm.AMOVW
if v.Op == ssa.OpARMLoweredZeroU { // unaligned
sz = 1
mov = arm.AMOVB
}
p := gc.Prog(mov)
p.Scond = arm.C_PBIT p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[2]) p.From.Reg = gc.SSARegNum(v.Args[2])
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REG_R1 p.To.Reg = arm.REG_R1
p.To.Offset = 4 p.To.Offset = sz
p2 := gc.Prog(arm.ACMP) p2 := gc.Prog(arm.ACMP)
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = gc.SSARegNum(v.Args[1]) p2.From.Reg = gc.SSARegNum(v.Args[1])
...@@ -605,26 +611,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -605,26 +611,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3 := gc.Prog(arm.ABLT) p3 := gc.Prog(arm.ABLT)
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) gc.Patch(p3, p)
case ssa.OpARMLoweredMove: case ssa.OpARMLoweredMove, ssa.OpARMLoweredMoveU:
// MOVW.P 4(R1), Rtmp // MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2) // MOVW.P Rtmp, 4(R2)
// CMP Rarg2, R1 // CMP Rarg2, R1
// BLT -3(PC) // BLT -3(PC)
// arg2 is the end of src // arg2 is the end of src
p := gc.Prog(arm.AMOVW) var sz int64 = 4
mov := arm.AMOVW
if v.Op == ssa.OpARMLoweredMoveU { // unaligned
sz = 1
mov = arm.AMOVB
}
p := gc.Prog(mov)
p.Scond = arm.C_PBIT p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = arm.REG_R1 p.From.Reg = arm.REG_R1
p.From.Offset = 4 p.From.Offset = sz
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP p.To.Reg = arm.REGTMP
p2 := gc.Prog(arm.AMOVW) p2 := gc.Prog(mov)
p2.Scond = arm.C_PBIT p2.Scond = arm.C_PBIT
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm.REGTMP p2.From.Reg = arm.REGTMP
p2.To.Type = obj.TYPE_MEM p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm.REG_R2 p2.To.Reg = arm.REG_R2
p2.To.Offset = 4 p2.To.Offset = sz
p3 := gc.Prog(arm.ACMP) p3 := gc.Prog(arm.ACMP)
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = gc.SSARegNum(v.Args[2]) p3.From.Reg = gc.SSARegNum(v.Args[2])
......
...@@ -2254,7 +2254,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { ...@@ -2254,7 +2254,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
if haspointers(et) { if haspointers(et) {
s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile) s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile)
} else { } else {
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(et), addr, arg.v, s.mem())
} }
} }
} }
...@@ -2387,14 +2387,14 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, ...@@ -2387,14 +2387,14 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32,
if deref { if deref {
// Treat as a mem->mem move. // Treat as a mem->mem move.
if right == nil { if right == nil {
s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem()) s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, SizeAlignAuxInt(t), addr, s.mem())
return return
} }
if wb { if wb {
s.insertWBmove(t, addr, right, line, rightIsVolatile) s.insertWBmove(t, addr, right, line, rightIsVolatile)
return return
} }
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem()) s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), addr, right, s.mem())
return return
} }
// Treat as a store. // Treat as a store.
...@@ -3080,7 +3080,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI ...@@ -3080,7 +3080,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI
tmp := temp(t) tmp := temp(t)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem()) s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
tmpaddr, _ := s.addr(tmp, true) tmpaddr, _ := s.addr(tmp, true)
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), tmpaddr, right, s.mem()) s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), tmpaddr, right, s.mem())
// Issue typedmemmove call. // Issue typedmemmove call.
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb) taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr) s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr)
...@@ -3090,7 +3090,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI ...@@ -3090,7 +3090,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI
s.endBlock().AddEdgeTo(bEnd) s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bElse) s.startBlock(bElse)
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem()) s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), left, right, s.mem())
s.endBlock().AddEdgeTo(bEnd) s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bEnd) s.startBlock(bEnd)
...@@ -4190,6 +4190,11 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { ...@@ -4190,6 +4190,11 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
} }
} }
// SizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t.
func SizeAlignAuxInt(t *Type) int64 {
return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64()
}
// extendIndex extends v to a full int width. // extendIndex extends v to a full int width.
// panic using the given function if v does not fit in an int (only on 32-bit archs). // panic using the given function if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value { func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value {
......
...@@ -89,7 +89,7 @@ func dse(f *Func) { ...@@ -89,7 +89,7 @@ func dse(f *Func) {
} else { } else {
// zero addr mem // zero addr mem
sz := v.Args[0].Type.ElemType().Size() sz := v.Args[0].Type.ElemType().Size()
if v.AuxInt != sz { if SizeAndAlign(v.AuxInt).Size() != sz {
f.Fatalf("mismatched zero/store sizes: %d and %d [%s]", f.Fatalf("mismatched zero/store sizes: %d and %d [%s]",
v.AuxInt, sz, v.LongString()) v.AuxInt, sz, v.LongString())
} }
......
...@@ -302,39 +302,47 @@ ...@@ -302,39 +302,47 @@
(Store [1] ptr val mem) -> (MOVBstore ptr val mem) (Store [1] ptr val mem) -> (MOVBstore ptr val mem)
// Lowering moves // Lowering moves
(Move [0] _ _ mem) -> mem (Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem) (Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBload src mem) mem)
(Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem) (Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstore dst (MOVWload src mem) mem)
(Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem) (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstore dst (MOVLload src mem) mem)
(Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem) (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 -> (MOVQstore dst (MOVQload src mem) mem)
(Move [16] dst src mem) -> (MOVOstore dst (MOVOload src mem) mem) (Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 -> (MOVOstore dst (MOVOload src mem) mem)
(Move [3] dst src mem) -> (Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [2] dst (MOVBload [2] src mem)
(MOVWstore dst (MOVWload src mem) mem)) (MOVWstore dst (MOVWload src mem) mem))
(Move [5] dst src mem) -> (Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 ->
(MOVBstore [4] dst (MOVBload [4] src mem) (MOVBstore [4] dst (MOVBload [4] src mem)
(MOVLstore dst (MOVLload src mem) mem)) (MOVLstore dst (MOVLload src mem) mem))
(Move [6] dst src mem) -> (Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 ->
(MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore [4] dst (MOVWload [4] src mem)
(MOVLstore dst (MOVLload src mem) mem)) (MOVLstore dst (MOVLload src mem) mem))
(Move [7] dst src mem) -> (Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 ->
(MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore [3] dst (MOVLload [3] src mem)
(MOVLstore dst (MOVLload src mem) mem)) (MOVLstore dst (MOVLload src mem) mem))
(Move [size] dst src mem) && size > 8 && size < 16 -> (Move [s] dst src mem) && SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 ->
(MOVQstore [size-8] dst (MOVQload [size-8] src mem) (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem)
(MOVQstore dst (MOVQload src mem) mem)) (MOVQstore dst (MOVQload src mem) mem))
// Adjust moves to be a multiple of 16 bytes. // Adjust moves to be a multiple of 16 bytes.
(Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 <= 8 -> (Move [s] dst src mem)
(Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) && SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 ->
(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
(ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])
(ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])
(MOVQstore dst (MOVQload src mem) mem)) (MOVQstore dst (MOVQload src mem) mem))
(Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 > 8 -> (Move [s] dst src mem)
(Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) && SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 ->
(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
(ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])
(ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])
(MOVOstore dst (MOVOload src mem) mem)) (MOVOstore dst (MOVOload src mem) mem))
// Medium copying uses a duff device. // Medium copying uses a duff device.
(Move [size] dst src mem) && size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice -> (Move [s] dst src mem)
(DUFFCOPY [14*(64-size/16)] dst src mem) && SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0
&& !config.noDuffDevice ->
(DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
// 14 and 64 are magic constants. 14 is the number of bytes to encode: // 14 and 64 are magic constants. 14 is the number of bytes to encode:
// MOVUPS (SI), X0 // MOVUPS (SI), X0
// ADDQ $16, SI // ADDQ $16, SI
...@@ -343,57 +351,64 @@ ...@@ -343,57 +351,64 @@
// and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy. // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
// Large copying uses REP MOVSQ. // Large copying uses REP MOVSQ.
(Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 -> (Move [s] dst src mem) && (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 ->
(REPMOVSQ dst src (MOVQconst [size/8]) mem) (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
// Lowering Zero instructions // Lowering Zero instructions
(Zero [0] _ mem) -> mem (Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem) (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
(Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem) (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstoreconst [0] destptr mem)
(Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem) (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstoreconst [0] destptr mem)
(Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem) (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 -> (MOVQstoreconst [0] destptr mem)
(Zero [3] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVWstoreconst [0] destptr mem)) (MOVWstoreconst [0] destptr mem))
(Zero [5] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem)) (MOVLstoreconst [0] destptr mem))
(Zero [6] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 ->
(MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem)) (MOVLstoreconst [0] destptr mem))
(Zero [7] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 ->
(MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,3)] destptr
(MOVLstoreconst [0] destptr mem)) (MOVLstoreconst [0] destptr mem))
// Strip off any fractional word zeroing. // Strip off any fractional word zeroing.
(Zero [size] destptr mem) && size%8 != 0 && size > 8 -> (Zero [s] destptr mem) && SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 ->
(Zero [size-size%8] (ADDQconst destptr [size%8]) (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (ADDQconst destptr [SizeAndAlign(s).Size()%8])
(MOVQstoreconst [0] destptr mem)) (MOVQstoreconst [0] destptr mem))
// Zero small numbers of words directly. // Zero small numbers of words directly.
(Zero [16] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 ->
(MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem)) (MOVQstoreconst [0] destptr mem))
(Zero [24] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 24 ->
(MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))) (MOVQstoreconst [0] destptr mem)))
(Zero [32] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 32 ->
(MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,24)] destptr
(MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem)))) (MOVQstoreconst [0] destptr mem))))
// Medium zeroing uses a duff device. // Medium zeroing uses a duff device.
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice -> (Zero [s] destptr mem)
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) && SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice -> && !config.noDuffDevice ->
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem) (Zero [SizeAndAlign(s).Size()-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [s] destptr mem)
&& SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(SizeAndAlign(s).Size())]
(ADDQconst [duffAdj(SizeAndAlign(s).Size())] destptr) (MOVOconst [0])
mem)
// Large zeroing uses REP STOSQ. // Large zeroing uses REP STOSQ.
(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 -> (Zero [s] destptr mem)
(REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem) && (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32))
&& SizeAndAlign(s).Size()%8 == 0 ->
(REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem)
// Lowering constants // Lowering constants
(Const8 [val]) -> (MOVLconst [val]) (Const8 [val]) -> (MOVLconst [val])
......
...@@ -269,54 +269,86 @@ ...@@ -269,54 +269,86 @@
(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVDstore ptr val mem) (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
// zero instructions // zero instructions
//TODO: check alignment? (Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Zero [0] _ mem) -> mem (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore ptr (MOVWconst [0]) mem)
(Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem) (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
(Zero [2] ptr mem) -> (MOVHstore ptr (MOVWconst [0]) mem) (MOVHstore ptr (MOVWconst [0]) mem)
(Zero [4] ptr mem) -> (MOVWstore ptr (MOVWconst [0]) mem) (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 ->
(MOVBstore [1] ptr (MOVWconst [0])
(Zero [3] ptr mem) -> (MOVBstore [0] ptr (MOVWconst [0]) mem))
(MOVBstore [2] ptr (MOVWconst [0]) (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
(MOVWstore ptr (MOVWconst [0]) mem)
(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem)) (MOVHstore [0] ptr (MOVWconst [0]) mem))
(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 ->
(MOVBstore [3] ptr (MOVWconst [0])
(MOVBstore [2] ptr (MOVWconst [0])
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem))))
// Strip off fractional word zeroing. (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 3 ->
(Zero [size] ptr mem) && size%4 != 0 && size > 4 -> (MOVBstore [2] ptr (MOVWconst [0])
(Zero [size%4] (ADDconst <ptr.Type> ptr [size-size%4]) (MOVBstore [1] ptr (MOVWconst [0])
(Zero <TypeMem> [size-size%4] ptr mem)) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
// Medium zeroing uses a duff device // Medium zeroing uses a duff device
// 4 and 128 are magic constants, see runtime/mkduff.go // 4 and 128 are magic constants, see runtime/mkduff.go
(Zero [size] ptr mem) && size%4 == 0 && size > 4 && size <= 512 -> (Zero [s] ptr mem)
(DUFFZERO [4 * (128 - int64(size/4))] ptr (MOVWconst [0]) mem) && SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
&& SizeAndAlign(s).Align()%4 == 0 ->
(DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
// Large zeroing uses a loop // Large zeroing uses a loop
(Zero [size] ptr mem) && size%4 == 0 && size > 512 -> (Zero [s] ptr mem)
(LoweredZero ptr (ADDconst <ptr.Type> ptr [size]) (MOVWconst [0]) mem) && SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512
&& SizeAndAlign(s).Align()%4 == 0 ->
(LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
// moves // Unaligned zeroing uses a loop
//TODO: check alignment? (Zero [s] ptr mem) && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0 ->
(Move [0] _ _ mem) -> mem (LoweredZeroU ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
(Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
(Move [2] dst src mem) -> (MOVHstore dst (MOVHUload src mem) mem)
(Move [4] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
(Move [3] dst src mem) -> // moves
(MOVBstore [2] dst (MOVBUload [2] src mem) (Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBUload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
(MOVHstore dst (MOVHUload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 ->
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem))
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
(MOVWstore dst (MOVWload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
(MOVHstore [2] dst (MOVHUload [2] src mem)
(MOVHstore dst (MOVHUload src mem) mem)) (MOVHstore dst (MOVHUload src mem) mem))
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 ->
(MOVBstore [3] dst (MOVBUload [3] src mem)
(MOVBstore [2] dst (MOVBUload [2] src mem)
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem))))
// Strip off fractional word move (Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
(Move [size] dst src mem) && size%4!=0 && size > 4 -> (MOVBstore [2] dst (MOVBUload [2] src mem)
(Move [size%4] (ADDconst <dst.Type> dst [size-size%4]) (ADDconst <src.Type> src [size-size%4]) (MOVBstore [1] dst (MOVBUload [1] src mem)
(Move <TypeMem> [size-size%4] dst src mem)) (MOVBstore dst (MOVBUload src mem) mem)))
// Medium move uses a duff device // Medium move uses a duff device
// 8 and 128 are magic constants, see runtime/mkduff.go // 8 and 128 are magic constants, see runtime/mkduff.go
(Move [size] dst src mem) && size%4 == 0 && size > 4 && size <= 512 -> (Move [s] dst src mem)
(DUFFCOPY [8 * (128 - int64(size/4))] dst src mem) && SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
&& SizeAndAlign(s).Align()%4 == 0 ->
(DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
// Large move uses a loop // Large move uses a loop
(Move [size] dst src mem) && size%4 == 0 && size > 512 -> (Move [s] dst src mem)
(LoweredMove dst src (ADDconst <src.Type> src [size]) mem) && SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512
&& SizeAndAlign(s).Align()%4 == 0 ->
(LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
// Unaligned move uses a loop
(Move [s] dst src mem) && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0 ->
(LoweredMoveU dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
// calls // calls
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
......
...@@ -249,7 +249,7 @@ func init() { ...@@ -249,7 +249,7 @@ func init() {
{name: "LoweredZeromask", argLength: 1, reg: gp11}, // 0 if arg0 == 1, 0xffffffff if arg0 != 0 {name: "LoweredZeromask", argLength: 1, reg: gp11}, // 0 if arg0 == 1, 0xffffffff if arg0 != 0
// duffzero // duffzero (must be 4-byte aligned)
// arg0 = address of memory to zero (in R1, changed as side effect) // arg0 = address of memory to zero (in R1, changed as side effect)
// arg1 = value to store (always zero) // arg1 = value to store (always zero)
// arg2 = mem // arg2 = mem
...@@ -265,7 +265,7 @@ func init() { ...@@ -265,7 +265,7 @@ func init() {
}, },
}, },
// duffcopy // duffcopy (must be 4-byte aligned)
// arg0 = address of dst memory (in R2, changed as side effect) // arg0 = address of dst memory (in R2, changed as side effect)
// arg1 = address of src memory (in R1, changed as side effect) // arg1 = address of src memory (in R1, changed as side effect)
// arg2 = mem // arg2 = mem
...@@ -281,7 +281,7 @@ func init() { ...@@ -281,7 +281,7 @@ func init() {
}, },
}, },
// large zeroing // large zeroing (must be 4-byte aligned)
// arg0 = address of memory to zero (in R1, changed as side effect) // arg0 = address of memory to zero (in R1, changed as side effect)
// arg1 = address of the end of the memory to zero // arg1 = address of the end of the memory to zero
// arg2 = value to store (always zero) // arg2 = value to store (always zero)
...@@ -299,7 +299,7 @@ func init() { ...@@ -299,7 +299,7 @@ func init() {
}, },
}, },
// large move // large move (must be 4-byte aligned)
// arg0 = address of dst memory (in R2, changed as side effect) // arg0 = address of dst memory (in R2, changed as side effect)
// arg1 = address of src memory (in R1, changed as side effect) // arg1 = address of src memory (in R1, changed as side effect)
// arg2 = address of the end of src memory // arg2 = address of the end of src memory
...@@ -318,6 +318,43 @@ func init() { ...@@ -318,6 +318,43 @@ func init() {
}, },
}, },
// unaligned zeroing
// arg0 = address of memory to zero (in R1, changed as side effect)
// arg1 = address of the end of the memory to zero
// arg2 = value to store (always zero)
// arg3 = mem
// returns mem
// MOVB.P Rarg2, 1(R1)
// CMP R1, Rarg1
// BLT -2(PC)
{
name: "LoweredZeroU",
argLength: 4,
reg: regInfo{
inputs: []regMask{buildReg("R1"), gp, gp},
clobbers: buildReg("R1 FLAGS"),
},
},
// unaligned move
// arg0 = address of dst memory (in R2, changed as side effect)
// arg1 = address of src memory (in R1, changed as side effect)
// arg2 = address of the end of src memory
// arg3 = mem
// returns mem
// MOVB.P 1(R1), Rtmp
// MOVB.P Rtmp, 1(R2)
// CMP R1, Rarg2
// BLT -3(PC)
{
name: "LoweredMoveU",
argLength: 4,
reg: regInfo{
inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
clobbers: buildReg("R1 R2 FLAGS"),
},
},
// Scheduler ensures LoweredGetClosurePtr occurs only in entry block, // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
// and sorts it to the very beginning of the block to prevent other // and sorts it to the very beginning of the block to prevent other
// use of R7 (arm.REGCTXT, the closure pointer) // use of R7 (arm.REGCTXT, the closure pointer)
......
...@@ -158,31 +158,48 @@ ...@@ -158,31 +158,48 @@
(Store [2] ptr val mem) -> (MOVHstore ptr val mem) (Store [2] ptr val mem) -> (MOVHstore ptr val mem)
(Store [1] ptr val mem) -> (MOVBstore ptr val mem) (Store [1] ptr val mem) -> (MOVBstore ptr val mem)
(Zero [0] _ mem) -> mem (Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem) (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
(Zero [2] destptr mem) -> (MOVHstoreconst [0] destptr mem) (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
(Zero [4] destptr mem) -> (MOVWstoreconst [0] destptr mem) (MOVHstoreconst [0] destptr mem)
(Zero [8] destptr mem) -> (MOVDstoreconst [0] destptr mem) (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 ->
(MOVBstoreconst [makeValAndOff(0,1)] destptr
(Zero [3] destptr mem) -> (MOVBstoreconst [0] destptr mem))
(MOVBstoreconst [makeValAndOff(0,2)] destptr (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
(MOVHstoreconst [0] destptr mem)) (MOVWstoreconst [0] destptr mem)
(Zero [5] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVHstoreconst [makeValAndOff(0,2)] destptr
(MOVWstoreconst [0] destptr mem)) (MOVHstoreconst [0] destptr mem))
(Zero [6] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 ->
(MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVBstoreconst [makeValAndOff(0,3)] destptr
(MOVWstoreconst [0] destptr mem)) (MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVBstoreconst [makeValAndOff(0,1)] destptr
(MOVBstoreconst [0] destptr mem))))
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
(MOVDstoreconst [0] destptr mem)
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
(MOVWstoreconst [makeValAndOff(0,4)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0 ->
(MOVHstoreconst [makeValAndOff(0,6)] destptr
(MOVHstoreconst [makeValAndOff(0,4)] destptr
(MOVHstoreconst [makeValAndOff(0,2)] destptr
(MOVHstoreconst [0] destptr mem))))
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVBstoreconst [makeValAndOff(0,1)] destptr
(MOVBstoreconst [0] destptr mem)))
// Zero small numbers of words directly. // Zero small numbers of words directly.
(Zero [16] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0 ->
(MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [makeValAndOff(0,8)] destptr
(MOVDstoreconst [0] destptr mem)) (MOVDstoreconst [0] destptr mem))
(Zero [24] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0 ->
(MOVDstoreconst [makeValAndOff(0,16)] destptr (MOVDstoreconst [makeValAndOff(0,16)] destptr
(MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [makeValAndOff(0,8)] destptr
(MOVDstoreconst [0] destptr mem))) (MOVDstoreconst [0] destptr mem)))
(Zero [32] destptr mem) -> (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0 ->
(MOVDstoreconst [makeValAndOff(0,24)] destptr (MOVDstoreconst [makeValAndOff(0,24)] destptr
(MOVDstoreconst [makeValAndOff(0,16)] destptr (MOVDstoreconst [makeValAndOff(0,16)] destptr
(MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [makeValAndOff(0,8)] destptr
......
...@@ -625,8 +625,10 @@ ...@@ -625,8 +625,10 @@
(Store [t.FieldType(0).Size()] dst f0 mem)))) (Store [t.FieldType(0).Size()] dst f0 mem))))
// un-SSAable values use mem->mem copies // un-SSAable values use mem->mem copies
(Store [size] dst (Load <t> src mem) mem) && !config.fe.CanSSA(t) -> (Move [size] dst src mem) (Store [size] dst (Load <t> src mem) mem) && !config.fe.CanSSA(t) ->
(Store [size] dst (Load <t> src mem) (VarDef {x} mem)) && !config.fe.CanSSA(t) -> (Move [size] dst src (VarDef {x} mem)) (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src mem)
(Store [size] dst (Load <t> src mem) (VarDef {x} mem)) && !config.fe.CanSSA(t) ->
(Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src (VarDef {x} mem))
// string ops // string ops
// Decomposing StringMake and lowering of StringPtr and StringLen // Decomposing StringMake and lowering of StringPtr and StringLen
......
...@@ -125,6 +125,34 @@ func (x ValAndOff) add(off int64) int64 { ...@@ -125,6 +125,34 @@ func (x ValAndOff) add(off int64) int64 {
return makeValAndOff(x.Val(), x.Off()+off) return makeValAndOff(x.Val(), x.Off()+off)
} }
// SizeAndAlign holds both the size and the alignment of a type,
// used in Zero and Move ops.
// The high 8 bits hold the alignment.
// The low 56 bits hold the size.
type SizeAndAlign int64
func (x SizeAndAlign) Size() int64 {
return int64(x) & (1<<56 - 1)
}
func (x SizeAndAlign) Align() int64 {
return int64(uint64(x) >> 56)
}
func (x SizeAndAlign) Int64() int64 {
return int64(x)
}
func (x SizeAndAlign) String() string {
return fmt.Sprintf("size=%d,align=%d", x.Size(), x.Align())
}
func MakeSizeAndAlign(size, align int64) SizeAndAlign {
if size&^(1<<56-1) != 0 {
panic("size too big in SizeAndAlign")
}
if align >= 1<<8 {
panic("alignment too big in SizeAndAlign")
}
return SizeAndAlign(size | align<<56)
}
func (op Op) isTupleGenerator() bool { func (op Op) isTupleGenerator() bool {
switch op { switch op {
case OpAdd32carry, OpSub32carry, OpMul32uhilo, case OpAdd32carry, OpSub32carry, OpMul32uhilo,
......
...@@ -454,6 +454,8 @@ const ( ...@@ -454,6 +454,8 @@ const (
OpARMDUFFCOPY OpARMDUFFCOPY
OpARMLoweredZero OpARMLoweredZero
OpARMLoweredMove OpARMLoweredMove
OpARMLoweredZeroU
OpARMLoweredMoveU
OpARMLoweredGetClosurePtr OpARMLoweredGetClosurePtr
OpARMMOVWconvert OpARMMOVWconvert
...@@ -5444,6 +5446,30 @@ var opcodeTable = [...]opInfo{ ...@@ -5444,6 +5446,30 @@ var opcodeTable = [...]opInfo{
clobbers: 4294967302, // R1 R2 FLAGS clobbers: 4294967302, // R1 R2 FLAGS
}, },
}, },
{
name: "LoweredZeroU",
argLen: 4,
reg: regInfo{
inputs: []inputInfo{
{0, 2}, // R1
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
clobbers: 4294967298, // R1 FLAGS
},
},
{
name: "LoweredMoveU",
argLen: 4,
reg: regInfo{
inputs: []inputInfo{
{0, 4}, // R2
{1, 2}, // R1
{2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
clobbers: 4294967302, // R1 R2 FLAGS
},
},
{ {
name: "LoweredGetClosurePtr", name: "LoweredGetClosurePtr",
argLen: 0, argLen: 0,
......
This diff is collapsed.
...@@ -2294,89 +2294,99 @@ func rewriteValuePPC64_OpXor8(v *Value, config *Config) bool { ...@@ -2294,89 +2294,99 @@ func rewriteValuePPC64_OpXor8(v *Value, config *Config) bool {
func rewriteValuePPC64_OpZero(v *Value, config *Config) bool { func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Zero [0] _ mem) // match: (Zero [s] _ mem)
// cond: // cond: SizeAndAlign(s).Size() == 0
// result: mem // result: mem
for { for {
if v.AuxInt != 0 { s := v.AuxInt
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 0) {
break break
} }
mem := v.Args[1]
v.reset(OpCopy) v.reset(OpCopy)
v.Type = mem.Type v.Type = mem.Type
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (Zero [1] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 1
// result: (MOVBstoreconst [0] destptr mem) // result: (MOVBstoreconst [0] destptr mem)
for { for {
if v.AuxInt != 1 { s := v.AuxInt
break
}
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 1) {
break
}
v.reset(OpPPC64MOVBstoreconst) v.reset(OpPPC64MOVBstoreconst)
v.AuxInt = 0 v.AuxInt = 0
v.AddArg(destptr) v.AddArg(destptr)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (Zero [2] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstoreconst [0] destptr mem) // result: (MOVHstoreconst [0] destptr mem)
for { for {
if v.AuxInt != 2 { s := v.AuxInt
break
}
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstoreconst) v.reset(OpPPC64MOVHstoreconst)
v.AuxInt = 0 v.AuxInt = 0
v.AddArg(destptr) v.AddArg(destptr)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (Zero [4] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 2
// result: (MOVWstoreconst [0] destptr mem) // result: (MOVBstoreconst [makeValAndOff(0,1)] destptr (MOVBstoreconst [0] destptr mem))
for { for {
if v.AuxInt != 4 { s := v.AuxInt
break
}
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
v.reset(OpPPC64MOVWstoreconst) if !(SizeAndAlign(s).Size() == 2) {
v.AuxInt = 0 break
}
v.reset(OpPPC64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 1)
v.AddArg(destptr) v.AddArg(destptr)
v.AddArg(mem) v0 := b.NewValue0(v.Line, OpPPC64MOVBstoreconst, TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
v.AddArg(v0)
return true return true
} }
// match: (Zero [8] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
// result: (MOVDstoreconst [0] destptr mem) // result: (MOVWstoreconst [0] destptr mem)
for { for {
if v.AuxInt != 8 { s := v.AuxInt
break
}
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
v.reset(OpPPC64MOVDstoreconst) if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
break
}
v.reset(OpPPC64MOVWstoreconst)
v.AuxInt = 0 v.AuxInt = 0
v.AddArg(destptr) v.AddArg(destptr)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (Zero [3] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem)) // result: (MOVHstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem))
for { for {
if v.AuxInt != 3 { s := v.AuxInt
break
}
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
v.reset(OpPPC64MOVBstoreconst) if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstoreconst)
v.AuxInt = makeValAndOff(0, 2) v.AuxInt = makeValAndOff(0, 2)
v.AddArg(destptr) v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVHstoreconst, TypeMem) v0 := b.NewValue0(v.Line, OpPPC64MOVHstoreconst, TypeMem)
...@@ -2386,35 +2396,61 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool { ...@@ -2386,35 +2396,61 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Zero [5] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 4
// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) // result: (MOVBstoreconst [makeValAndOff(0,3)] destptr (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVBstoreconst [makeValAndOff(0,1)] destptr (MOVBstoreconst [0] destptr mem))))
for { for {
if v.AuxInt != 5 { s := v.AuxInt
break
}
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 4) {
break
}
v.reset(OpPPC64MOVBstoreconst) v.reset(OpPPC64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 4) v.AuxInt = makeValAndOff(0, 3)
v.AddArg(destptr) v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVWstoreconst, TypeMem) v0 := b.NewValue0(v.Line, OpPPC64MOVBstoreconst, TypeMem)
v0.AuxInt = 0 v0.AuxInt = makeValAndOff(0, 2)
v0.AddArg(destptr) v0.AddArg(destptr)
v0.AddArg(mem) v1 := b.NewValue0(v.Line, OpPPC64MOVBstoreconst, TypeMem)
v1.AuxInt = makeValAndOff(0, 1)
v1.AddArg(destptr)
v2 := b.NewValue0(v.Line, OpPPC64MOVBstoreconst, TypeMem)
v2.AuxInt = 0
v2.AddArg(destptr)
v2.AddArg(mem)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Zero [6] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) // result: (MOVDstoreconst [0] destptr mem)
for { for {
if v.AuxInt != 6 { s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
break break
} }
v.reset(OpPPC64MOVDstoreconst)
v.AuxInt = 0
v.AddArg(destptr)
v.AddArg(mem)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
// result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem))
for {
s := v.AuxInt
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
v.reset(OpPPC64MOVHstoreconst) if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
break
}
v.reset(OpPPC64MOVWstoreconst)
v.AuxInt = makeValAndOff(0, 4) v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr) v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVWstoreconst, TypeMem) v0 := b.NewValue0(v.Line, OpPPC64MOVWstoreconst, TypeMem)
...@@ -2424,15 +2460,68 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool { ...@@ -2424,15 +2460,68 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Zero [16] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [0] destptr mem)) // result: (MOVHstoreconst [makeValAndOff(0,6)] destptr (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVHstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem))))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstoreconst)
v.AuxInt = makeValAndOff(0, 6)
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVHstoreconst, TypeMem)
v0.AuxInt = makeValAndOff(0, 4)
v0.AddArg(destptr)
v1 := b.NewValue0(v.Line, OpPPC64MOVHstoreconst, TypeMem)
v1.AuxInt = makeValAndOff(0, 2)
v1.AddArg(destptr)
v2 := b.NewValue0(v.Line, OpPPC64MOVHstoreconst, TypeMem)
v2.AuxInt = 0
v2.AddArg(destptr)
v2.AddArg(mem)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 3
// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVBstoreconst [makeValAndOff(0,1)] destptr (MOVBstoreconst [0] destptr mem)))
for { for {
if v.AuxInt != 16 { s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 3) {
break break
} }
v.reset(OpPPC64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 2)
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVBstoreconst, TypeMem)
v0.AuxInt = makeValAndOff(0, 1)
v0.AddArg(destptr)
v1 := b.NewValue0(v.Line, OpPPC64MOVBstoreconst, TypeMem)
v1.AuxInt = 0
v1.AddArg(destptr)
v1.AddArg(mem)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [0] destptr mem))
for {
s := v.AuxInt
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
break
}
v.reset(OpPPC64MOVDstoreconst) v.reset(OpPPC64MOVDstoreconst)
v.AuxInt = makeValAndOff(0, 8) v.AuxInt = makeValAndOff(0, 8)
v.AddArg(destptr) v.AddArg(destptr)
...@@ -2443,15 +2532,16 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool { ...@@ -2443,15 +2532,16 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Zero [24] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVDstoreconst [makeValAndOff(0,16)] destptr (MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [0] destptr mem))) // result: (MOVDstoreconst [makeValAndOff(0,16)] destptr (MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [0] destptr mem)))
for { for {
if v.AuxInt != 24 { s := v.AuxInt
break
}
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
break
}
v.reset(OpPPC64MOVDstoreconst) v.reset(OpPPC64MOVDstoreconst)
v.AuxInt = makeValAndOff(0, 16) v.AuxInt = makeValAndOff(0, 16)
v.AddArg(destptr) v.AddArg(destptr)
...@@ -2466,15 +2556,16 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool { ...@@ -2466,15 +2556,16 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Zero [32] destptr mem) // match: (Zero [s] destptr mem)
// cond: // cond: SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVDstoreconst [makeValAndOff(0,24)] destptr (MOVDstoreconst [makeValAndOff(0,16)] destptr (MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [0] destptr mem)))) // result: (MOVDstoreconst [makeValAndOff(0,24)] destptr (MOVDstoreconst [makeValAndOff(0,16)] destptr (MOVDstoreconst [makeValAndOff(0,8)] destptr (MOVDstoreconst [0] destptr mem))))
for { for {
if v.AuxInt != 32 { s := v.AuxInt
break
}
destptr := v.Args[0] destptr := v.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0) {
break
}
v.reset(OpPPC64MOVDstoreconst) v.reset(OpPPC64MOVDstoreconst)
v.AuxInt = makeValAndOff(0, 24) v.AuxInt = makeValAndOff(0, 24)
v.AddArg(destptr) v.AddArg(destptr)
......
...@@ -8837,7 +8837,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool { ...@@ -8837,7 +8837,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
} }
// match: (Store [size] dst (Load <t> src mem) mem) // match: (Store [size] dst (Load <t> src mem) mem)
// cond: !config.fe.CanSSA(t) // cond: !config.fe.CanSSA(t)
// result: (Move [size] dst src mem) // result: (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src mem)
for { for {
size := v.AuxInt size := v.AuxInt
dst := v.Args[0] dst := v.Args[0]
...@@ -8855,7 +8855,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool { ...@@ -8855,7 +8855,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
break break
} }
v.reset(OpMove) v.reset(OpMove)
v.AuxInt = size v.AuxInt = MakeSizeAndAlign(size, t.Alignment()).Int64()
v.AddArg(dst) v.AddArg(dst)
v.AddArg(src) v.AddArg(src)
v.AddArg(mem) v.AddArg(mem)
...@@ -8863,7 +8863,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool { ...@@ -8863,7 +8863,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
} }
// match: (Store [size] dst (Load <t> src mem) (VarDef {x} mem)) // match: (Store [size] dst (Load <t> src mem) (VarDef {x} mem))
// cond: !config.fe.CanSSA(t) // cond: !config.fe.CanSSA(t)
// result: (Move [size] dst src (VarDef {x} mem)) // result: (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src (VarDef {x} mem))
for { for {
size := v.AuxInt size := v.AuxInt
dst := v.Args[0] dst := v.Args[0]
...@@ -8886,7 +8886,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool { ...@@ -8886,7 +8886,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
break break
} }
v.reset(OpMove) v.reset(OpMove)
v.AuxInt = size v.AuxInt = MakeSizeAndAlign(size, t.Alignment()).Int64()
v.AddArg(dst) v.AddArg(dst)
v.AddArg(src) v.AddArg(src)
v0 := b.NewValue0(v.Line, OpVarDef, TypeMem) v0 := b.NewValue0(v.Line, OpVarDef, TypeMem)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment