Commit ce49f950 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/compile: remove amd64p32 rules

And simplify the remaining rules.

Updates #30439

Change-Id: Ib89dce16b17ae881824178346ed6ab895b79627e
Reviewed-on: https://go-review.googlesource.com/c/go/+/204600
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 15bff208
...@@ -4,13 +4,11 @@ ...@@ -4,13 +4,11 @@
// Lowering arithmetic // Lowering arithmetic
(Add(64|32|16|8) x y) -> (ADD(Q|L|L|L) x y) (Add(64|32|16|8) x y) -> (ADD(Q|L|L|L) x y)
(AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y) (AddPtr x y) -> (ADDQ x y)
(AddPtr x y) && config.PtrSize == 4 -> (ADDL x y)
(Add(32|64)F x y) -> (ADDS(S|D) x y) (Add(32|64)F x y) -> (ADDS(S|D) x y)
(Sub(64|32|16|8) x y) -> (SUB(Q|L|L|L) x y) (Sub(64|32|16|8) x y) -> (SUB(Q|L|L|L) x y)
(SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y) (SubPtr x y) -> (SUBQ x y)
(SubPtr x y) && config.PtrSize == 4 -> (SUBL x y)
(Sub(32|64)F x y) -> (SUBS(S|D) x y) (Sub(32|64)F x y) -> (SUBS(S|D) x y)
(Mul(64|32|16|8) x y) -> (MUL(Q|L|L|L) x y) (Mul(64|32|16|8) x y) -> (MUL(Q|L|L|L) x y)
...@@ -76,9 +74,8 @@ ...@@ -76,9 +74,8 @@
(Not x) -> (XORLconst [1] x) (Not x) -> (XORLconst [1] x)
// Lowering pointer arithmetic // Lowering pointer arithmetic
(OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr) (OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr) (OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
(OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
// Lowering other arithmetic // Lowering other arithmetic
(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) (Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
...@@ -217,18 +214,16 @@ ...@@ -217,18 +214,16 @@
(Geq(32|64)F x y) -> (SETGEF (UCOMIS(S|D) x y)) (Geq(32|64)F x y) -> (SETGEF (UCOMIS(S|D) x y))
(Eq(64|32|16|8|B) x y) -> (SETEQ (CMP(Q|L|W|B|B) x y)) (Eq(64|32|16|8|B) x y) -> (SETEQ (CMP(Q|L|W|B|B) x y))
(EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y)) (EqPtr x y) -> (SETEQ (CMPQ x y))
(EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y))
(Eq(32|64)F x y) -> (SETEQF (UCOMIS(S|D) x y)) (Eq(32|64)F x y) -> (SETEQF (UCOMIS(S|D) x y))
(Neq(64|32|16|8|B) x y) -> (SETNE (CMP(Q|L|W|B|B) x y)) (Neq(64|32|16|8|B) x y) -> (SETNE (CMP(Q|L|W|B|B) x y))
(NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y)) (NeqPtr x y) -> (SETNE (CMPQ x y))
(NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y))
(Neq(32|64)F x y) -> (SETNEF (UCOMIS(S|D) x y)) (Neq(32|64)F x y) -> (SETNEF (UCOMIS(S|D) x y))
// Lowering loads // Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem) (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem) (Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem) (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem) (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem) (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
...@@ -420,8 +415,7 @@ ...@@ -420,8 +415,7 @@
(Const64 [val]) -> (MOVQconst [val]) (Const64 [val]) -> (MOVQconst [val])
(Const32F [val]) -> (MOVSSconst [val]) (Const32F [val]) -> (MOVSSconst [val])
(Const64F [val]) -> (MOVSDconst [val]) (Const64F [val]) -> (MOVSDconst [val])
(ConstNil) && config.PtrSize == 8 -> (MOVQconst [0]) (ConstNil) -> (MOVQconst [0])
(ConstNil) && config.PtrSize == 4 -> (MOVLconst [0])
(ConstBool [b]) -> (MOVLconst [b]) (ConstBool [b]) -> (MOVLconst [b])
// Lowering calls // Lowering calls
...@@ -476,21 +470,16 @@ ...@@ -476,21 +470,16 @@
(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) -> y (CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) -> y
// Miscellaneous // Miscellaneous
(IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p)) (IsNonNil p) -> (SETNE (TESTQ p p))
(IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p)) (IsInBounds idx len) -> (SETB (CMPQ idx len))
(IsInBounds idx len) && config.PtrSize == 8 -> (SETB (CMPQ idx len)) (IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
(IsInBounds idx len) && config.PtrSize == 4 -> (SETB (CMPL idx len))
(IsSliceInBounds idx len) && config.PtrSize == 8 -> (SETBE (CMPQ idx len))
(IsSliceInBounds idx len) && config.PtrSize == 4 -> (SETBE (CMPL idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem) (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem) (GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr) (GetClosurePtr) -> (LoweredGetClosurePtr)
(GetCallerPC) -> (LoweredGetCallerPC) (GetCallerPC) -> (LoweredGetCallerPC)
(GetCallerSP) -> (LoweredGetCallerSP) (GetCallerSP) -> (LoweredGetCallerSP)
(Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base) (Addr {sym} base) -> (LEAQ {sym} base)
(Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base) (LocalAddr {sym} base _) -> (LEAQ {sym} base)
(LocalAddr {sym} base _) && config.PtrSize == 8 -> (LEAQ {sym} base)
(LocalAddr {sym} base _) && config.PtrSize == 4 -> (LEAL {sym} base)
(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEstore [off] {sym} ptr x mem)
...@@ -528,16 +517,14 @@ ...@@ -528,16 +517,14 @@
(AtomicLoad8 ptr mem) -> (MOVBatomicload ptr mem) (AtomicLoad8 ptr mem) -> (MOVBatomicload ptr mem)
(AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem) (AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem)
(AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem) (AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem)
(AtomicLoadPtr ptr mem) && config.PtrSize == 8 -> (MOVQatomicload ptr mem) (AtomicLoadPtr ptr mem) -> (MOVQatomicload ptr mem)
(AtomicLoadPtr ptr mem) && config.PtrSize == 4 -> (MOVLatomicload ptr mem)
// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load. // Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those? // TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
(AtomicStore8 ptr val mem) -> (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem)) (AtomicStore8 ptr val mem) -> (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) (AtomicStore32 ptr val mem) -> (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) (AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) (AtomicStorePtrNoWB ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
// Atomic exchanges. // Atomic exchanges.
(AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem) (AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
......
...@@ -50037,70 +50037,28 @@ func rewriteValueAMD64_OpAdd8_0(v *Value) bool { ...@@ -50037,70 +50037,28 @@ func rewriteValueAMD64_OpAdd8_0(v *Value) bool {
} }
} }
func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { func rewriteValueAMD64_OpAddPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (AddPtr x y) // match: (AddPtr x y)
// cond: config.PtrSize == 8
// result: (ADDQ x y) // result: (ADDQ x y)
for { for {
y := v.Args[1] y := v.Args[1]
x := v.Args[0] x := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64ADDQ) v.reset(OpAMD64ADDQ)
v.AddArg(x) v.AddArg(x)
v.AddArg(y) v.AddArg(y)
return true return true
} }
// match: (AddPtr x y)
// cond: config.PtrSize == 4
// result: (ADDL x y)
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64ADDL)
v.AddArg(x)
v.AddArg(y)
return true
}
return false
} }
func rewriteValueAMD64_OpAddr_0(v *Value) bool { func rewriteValueAMD64_OpAddr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (Addr {sym} base) // match: (Addr {sym} base)
// cond: config.PtrSize == 8
// result: (LEAQ {sym} base) // result: (LEAQ {sym} base)
for { for {
sym := v.Aux sym := v.Aux
base := v.Args[0] base := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64LEAQ) v.reset(OpAMD64LEAQ)
v.Aux = sym v.Aux = sym
v.AddArg(base) v.AddArg(base)
return true return true
} }
// match: (Addr {sym} base)
// cond: config.PtrSize == 4
// result: (LEAL {sym} base)
for {
sym := v.Aux
base := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64LEAL)
v.Aux = sym
v.AddArg(base)
return true
}
return false
} }
func rewriteValueAMD64_OpAnd16_0(v *Value) bool { func rewriteValueAMD64_OpAnd16_0(v *Value) bool {
// match: (And16 x y) // match: (And16 x y)
...@@ -50311,37 +50269,16 @@ func rewriteValueAMD64_OpAtomicLoad8_0(v *Value) bool { ...@@ -50311,37 +50269,16 @@ func rewriteValueAMD64_OpAtomicLoad8_0(v *Value) bool {
} }
} }
func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (AtomicLoadPtr ptr mem) // match: (AtomicLoadPtr ptr mem)
// cond: config.PtrSize == 8
// result: (MOVQatomicload ptr mem) // result: (MOVQatomicload ptr mem)
for { for {
mem := v.Args[1] mem := v.Args[1]
ptr := v.Args[0] ptr := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64MOVQatomicload) v.reset(OpAMD64MOVQatomicload)
v.AddArg(ptr) v.AddArg(ptr)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (AtomicLoadPtr ptr mem)
// cond: config.PtrSize == 4
// result: (MOVLatomicload ptr mem)
for {
mem := v.Args[1]
ptr := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64MOVLatomicload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
} }
func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool {
// match: (AtomicOr8 ptr val mem) // match: (AtomicOr8 ptr val mem)
...@@ -50413,18 +50350,13 @@ func rewriteValueAMD64_OpAtomicStore8_0(v *Value) bool { ...@@ -50413,18 +50350,13 @@ func rewriteValueAMD64_OpAtomicStore8_0(v *Value) bool {
} }
func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
b := v.Block b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
// match: (AtomicStorePtrNoWB ptr val mem) // match: (AtomicStorePtrNoWB ptr val mem)
// cond: config.PtrSize == 8
// result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
for { for {
mem := v.Args[2] mem := v.Args[2]
ptr := v.Args[0] ptr := v.Args[0]
val := v.Args[1] val := v.Args[1]
if !(config.PtrSize == 8) {
break
}
v.reset(OpSelect1) v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
v0.AddArg(val) v0.AddArg(val)
...@@ -50433,25 +50365,6 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { ...@@ -50433,25 +50365,6 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (AtomicStorePtrNoWB ptr val mem)
// cond: config.PtrSize == 4
// result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
for {
mem := v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
if !(config.PtrSize == 4) {
break
}
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
return true
}
return false
} }
func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { func rewriteValueAMD64_OpAvg64u_0(v *Value) bool {
// match: (Avg64u x y) // match: (Avg64u x y)
...@@ -51774,31 +51687,13 @@ func rewriteValueAMD64_OpConstBool_0(v *Value) bool { ...@@ -51774,31 +51687,13 @@ func rewriteValueAMD64_OpConstBool_0(v *Value) bool {
} }
} }
func rewriteValueAMD64_OpConstNil_0(v *Value) bool { func rewriteValueAMD64_OpConstNil_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (ConstNil) // match: (ConstNil)
// cond: config.PtrSize == 8
// result: (MOVQconst [0]) // result: (MOVQconst [0])
for { for {
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64MOVQconst) v.reset(OpAMD64MOVQconst)
v.AuxInt = 0 v.AuxInt = 0
return true return true
} }
// match: (ConstNil)
// cond: config.PtrSize == 4
// result: (MOVLconst [0])
for {
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
return false
} }
func rewriteValueAMD64_OpCtz16_0(v *Value) bool { func rewriteValueAMD64_OpCtz16_0(v *Value) bool {
b := v.Block b := v.Block
...@@ -52303,16 +52198,11 @@ func rewriteValueAMD64_OpEqB_0(v *Value) bool { ...@@ -52303,16 +52198,11 @@ func rewriteValueAMD64_OpEqB_0(v *Value) bool {
} }
func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { func rewriteValueAMD64_OpEqPtr_0(v *Value) bool {
b := v.Block b := v.Block
config := b.Func.Config
// match: (EqPtr x y) // match: (EqPtr x y)
// cond: config.PtrSize == 8
// result: (SETEQ (CMPQ x y)) // result: (SETEQ (CMPQ x y))
for { for {
y := v.Args[1] y := v.Args[1]
x := v.Args[0] x := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETEQ) v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x) v0.AddArg(x)
...@@ -52320,23 +52210,6 @@ func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { ...@@ -52320,23 +52210,6 @@ func rewriteValueAMD64_OpEqPtr_0(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (EqPtr x y)
// cond: config.PtrSize == 4
// result: (SETEQ (CMPL x y))
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
return false
} }
func rewriteValueAMD64_OpFloor_0(v *Value) bool { func rewriteValueAMD64_OpFloor_0(v *Value) bool {
// match: (Floor x) // match: (Floor x)
...@@ -52761,16 +52634,11 @@ func rewriteValueAMD64_OpInterCall_0(v *Value) bool { ...@@ -52761,16 +52634,11 @@ func rewriteValueAMD64_OpInterCall_0(v *Value) bool {
} }
func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool {
b := v.Block b := v.Block
config := b.Func.Config
// match: (IsInBounds idx len) // match: (IsInBounds idx len)
// cond: config.PtrSize == 8
// result: (SETB (CMPQ idx len)) // result: (SETB (CMPQ idx len))
for { for {
len := v.Args[1] len := v.Args[1]
idx := v.Args[0] idx := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETB) v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(idx) v0.AddArg(idx)
...@@ -52778,35 +52646,13 @@ func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { ...@@ -52778,35 +52646,13 @@ func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (IsInBounds idx len)
// cond: config.PtrSize == 4
// result: (SETB (CMPL idx len))
for {
len := v.Args[1]
idx := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
return false
} }
func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool {
b := v.Block b := v.Block
config := b.Func.Config
// match: (IsNonNil p) // match: (IsNonNil p)
// cond: config.PtrSize == 8
// result: (SETNE (TESTQ p p)) // result: (SETNE (TESTQ p p))
for { for {
p := v.Args[0] p := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETNE) v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
v0.AddArg(p) v0.AddArg(p)
...@@ -52814,35 +52660,14 @@ func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { ...@@ -52814,35 +52660,14 @@ func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (IsNonNil p)
// cond: config.PtrSize == 4
// result: (SETNE (TESTL p p))
for {
p := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags)
v0.AddArg(p)
v0.AddArg(p)
v.AddArg(v0)
return true
}
return false
} }
func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool {
b := v.Block b := v.Block
config := b.Func.Config
// match: (IsSliceInBounds idx len) // match: (IsSliceInBounds idx len)
// cond: config.PtrSize == 8
// result: (SETBE (CMPQ idx len)) // result: (SETBE (CMPQ idx len))
for { for {
len := v.Args[1] len := v.Args[1]
idx := v.Args[0] idx := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETBE) v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(idx) v0.AddArg(idx)
...@@ -52850,23 +52675,6 @@ func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { ...@@ -52850,23 +52675,6 @@ func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (IsSliceInBounds idx len)
// cond: config.PtrSize == 4
// result: (SETBE (CMPL idx len))
for {
len := v.Args[1]
idx := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
return false
} }
func rewriteValueAMD64_OpLeq16_0(v *Value) bool { func rewriteValueAMD64_OpLeq16_0(v *Value) bool {
b := v.Block b := v.Block
...@@ -53169,16 +52977,14 @@ func rewriteValueAMD64_OpLess8U_0(v *Value) bool { ...@@ -53169,16 +52977,14 @@ func rewriteValueAMD64_OpLess8U_0(v *Value) bool {
} }
} }
func rewriteValueAMD64_OpLoad_0(v *Value) bool { func rewriteValueAMD64_OpLoad_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (Load <t> ptr mem) // match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) // cond: (is64BitInt(t) || isPtr(t))
// result: (MOVQload ptr mem) // result: (MOVQload ptr mem)
for { for {
t := v.Type t := v.Type
mem := v.Args[1] mem := v.Args[1]
ptr := v.Args[0] ptr := v.Args[0]
if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { if !(is64BitInt(t) || isPtr(t)) {
break break
} }
v.reset(OpAMD64MOVQload) v.reset(OpAMD64MOVQload)
...@@ -53187,13 +52993,13 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool { ...@@ -53187,13 +52993,13 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool {
return true return true
} }
// match: (Load <t> ptr mem) // match: (Load <t> ptr mem)
// cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) // cond: is32BitInt(t)
// result: (MOVLload ptr mem) // result: (MOVLload ptr mem)
for { for {
t := v.Type t := v.Type
mem := v.Args[1] mem := v.Args[1]
ptr := v.Args[0] ptr := v.Args[0]
if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { if !(is32BitInt(t)) {
break break
} }
v.reset(OpAMD64MOVLload) v.reset(OpAMD64MOVLload)
...@@ -53264,39 +53070,17 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool { ...@@ -53264,39 +53070,17 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool {
return false return false
} }
func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool { func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (LocalAddr {sym} base _) // match: (LocalAddr {sym} base _)
// cond: config.PtrSize == 8
// result: (LEAQ {sym} base) // result: (LEAQ {sym} base)
for { for {
sym := v.Aux sym := v.Aux
_ = v.Args[1] _ = v.Args[1]
base := v.Args[0] base := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64LEAQ) v.reset(OpAMD64LEAQ)
v.Aux = sym v.Aux = sym
v.AddArg(base) v.AddArg(base)
return true return true
} }
// match: (LocalAddr {sym} base _)
// cond: config.PtrSize == 4
// result: (LEAL {sym} base)
for {
sym := v.Aux
_ = v.Args[1]
base := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64LEAL)
v.Aux = sym
v.AddArg(base)
return true
}
return false
} }
func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool {
b := v.Block b := v.Block
...@@ -54959,16 +54743,11 @@ func rewriteValueAMD64_OpNeqB_0(v *Value) bool { ...@@ -54959,16 +54743,11 @@ func rewriteValueAMD64_OpNeqB_0(v *Value) bool {
} }
func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool {
b := v.Block b := v.Block
config := b.Func.Config
// match: (NeqPtr x y) // match: (NeqPtr x y)
// cond: config.PtrSize == 8
// result: (SETNE (CMPQ x y)) // result: (SETNE (CMPQ x y))
for { for {
y := v.Args[1] y := v.Args[1]
x := v.Args[0] x := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETNE) v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x) v0.AddArg(x)
...@@ -54976,23 +54755,6 @@ func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { ...@@ -54976,23 +54755,6 @@ func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (NeqPtr x y)
// cond: config.PtrSize == 4
// result: (SETNE (CMPL x y))
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
return false
} }
func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { func rewriteValueAMD64_OpNilCheck_0(v *Value) bool {
// match: (NilCheck ptr mem) // match: (NilCheck ptr mem)
...@@ -55019,15 +54781,14 @@ func rewriteValueAMD64_OpNot_0(v *Value) bool { ...@@ -55019,15 +54781,14 @@ func rewriteValueAMD64_OpNot_0(v *Value) bool {
} }
func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
b := v.Block b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
// match: (OffPtr [off] ptr) // match: (OffPtr [off] ptr)
// cond: config.PtrSize == 8 && is32Bit(off) // cond: is32Bit(off)
// result: (ADDQconst [off] ptr) // result: (ADDQconst [off] ptr)
for { for {
off := v.AuxInt off := v.AuxInt
ptr := v.Args[0] ptr := v.Args[0]
if !(config.PtrSize == 8 && is32Bit(off)) { if !(is32Bit(off)) {
break break
} }
v.reset(OpAMD64ADDQconst) v.reset(OpAMD64ADDQconst)
...@@ -55036,14 +54797,10 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { ...@@ -55036,14 +54797,10 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
return true return true
} }
// match: (OffPtr [off] ptr) // match: (OffPtr [off] ptr)
// cond: config.PtrSize == 8
// result: (ADDQ (MOVQconst [off]) ptr) // result: (ADDQ (MOVQconst [off]) ptr)
for { for {
off := v.AuxInt off := v.AuxInt
ptr := v.Args[0] ptr := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64ADDQ) v.reset(OpAMD64ADDQ)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = off v0.AuxInt = off
...@@ -55051,21 +54808,6 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { ...@@ -55051,21 +54808,6 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
v.AddArg(ptr) v.AddArg(ptr)
return true return true
} }
// match: (OffPtr [off] ptr)
// cond: config.PtrSize == 4
// result: (ADDLconst [off] ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64ADDLconst)
v.AuxInt = off
v.AddArg(ptr)
return true
}
return false
} }
func rewriteValueAMD64_OpOr16_0(v *Value) bool { func rewriteValueAMD64_OpOr16_0(v *Value) bool {
// match: (Or16 x y) // match: (Or16 x y)
...@@ -57223,37 +56965,16 @@ func rewriteValueAMD64_OpSub8_0(v *Value) bool { ...@@ -57223,37 +56965,16 @@ func rewriteValueAMD64_OpSub8_0(v *Value) bool {
} }
} }
func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { func rewriteValueAMD64_OpSubPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (SubPtr x y) // match: (SubPtr x y)
// cond: config.PtrSize == 8
// result: (SUBQ x y) // result: (SUBQ x y)
for { for {
y := v.Args[1] y := v.Args[1]
x := v.Args[0] x := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SUBQ) v.reset(OpAMD64SUBQ)
v.AddArg(x) v.AddArg(x)
v.AddArg(y) v.AddArg(y)
return true return true
} }
// match: (SubPtr x y)
// cond: config.PtrSize == 4
// result: (SUBL x y)
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SUBL)
v.AddArg(x)
v.AddArg(y)
return true
}
return false
} }
func rewriteValueAMD64_OpTrunc_0(v *Value) bool { func rewriteValueAMD64_OpTrunc_0(v *Value) bool {
// match: (Trunc x) // match: (Trunc x)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment