Commit f0bab316 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

[dev.ssa] cmd/compile: add some constant folding optimizations

These were helpful for some autogenerated code
I'm working with.

Change-Id: I7b89c69552ca99bf560a14bfbcd6bd238595ddf6
Reviewed-on: https://go-review.googlesource.com/24742Reviewed-by: default avatarKeith Randall <khr@golang.org>
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 8599fdd9
......@@ -509,6 +509,12 @@
(ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
(ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x)
(XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
(XORQconst [c] (XORQconst [d] x)) -> (XORQconst [c ^ d] x)
(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
(MULQconst [c] (MULQconst [d] x)) -> (MULQconst [c * d] x)
(ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
(ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x)
(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
......@@ -558,6 +564,16 @@
(SHRL x (ANDLconst [31] y)) -> (SHRL x y)
(SHRQ x (ANDQconst [63] y)) -> (SHRQ x y)
(ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x)
(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
(ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
(ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
(ROLQconst [0] x) -> x
(ROLLconst [0] x) -> x
(ROLWconst [0] x) -> x
(ROLBconst [0] x) -> x
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even
// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
......
......@@ -67,6 +67,12 @@
(Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) * i2f(d))])
// Convert x * -1 to -x. The front-end catches some but not all of these.
(Mul8 (Const8 [-1]) x) -> (Neg8 x)
(Mul16 (Const16 [-1]) x) -> (Neg16 x)
(Mul32 (Const32 [-1]) x) -> (Neg32 x)
(Mul64 (Const64 [-1]) x) -> (Neg64 x)
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(int8(c % d))])
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(c % d))])
(Mod32 (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(c % d))])
......
......@@ -542,6 +542,14 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpOr8(v, config)
case OpOrB:
return rewriteValueAMD64_OpOrB(v, config)
case OpAMD64ROLBconst:
return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
case OpAMD64ROLLconst:
return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
case OpAMD64ROLQconst:
return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
case OpAMD64ROLWconst:
return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
case OpRsh16Ux16:
return rewriteValueAMD64_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
......@@ -11419,6 +11427,22 @@ func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MULLconst [c] (MULLconst [d] x))
// cond:
// result: (MULLconst [int64(int32(c * d))] x)
for {
c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MULLconst {
break
}
d := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpAMD64MULLconst)
v.AuxInt = int64(int32(c * d))
v.AddArg(x)
return true
}
// match: (MULLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [int64(int32(c*d))])
......@@ -11479,6 +11503,22 @@ func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MULQconst [c] (MULQconst [d] x))
// cond:
// result: (MULQconst [c * d] x)
for {
c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MULQconst {
break
}
d := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpAMD64MULQconst)
v.AuxInt = c * d
v.AddArg(x)
return true
}
// match: (MULQconst [-1] x)
// cond:
// result: (NEGQ x)
......@@ -13805,6 +13845,142 @@ func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
return true
}
}
func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ROLBconst [c] (ROLBconst [d] x))
// cond:
// result: (ROLBconst [(c+d)& 7] x)
for {
c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ROLBconst {
break
}
d := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpAMD64ROLBconst)
v.AuxInt = (c + d) & 7
v.AddArg(x)
return true
}
// match: (ROLBconst [0] x)
// cond:
// result: x
for {
if v.AuxInt != 0 {
break
}
x := v.Args[0]
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ROLLconst [c] (ROLLconst [d] x))
// cond:
// result: (ROLLconst [(c+d)&31] x)
for {
c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ROLLconst {
break
}
d := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpAMD64ROLLconst)
v.AuxInt = (c + d) & 31
v.AddArg(x)
return true
}
// match: (ROLLconst [0] x)
// cond:
// result: x
for {
if v.AuxInt != 0 {
break
}
x := v.Args[0]
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ROLQconst [c] (ROLQconst [d] x))
// cond:
// result: (ROLQconst [(c+d)&63] x)
for {
c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ROLQconst {
break
}
d := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpAMD64ROLQconst)
v.AuxInt = (c + d) & 63
v.AddArg(x)
return true
}
// match: (ROLQconst [0] x)
// cond:
// result: x
for {
if v.AuxInt != 0 {
break
}
x := v.Args[0]
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ROLWconst [c] (ROLWconst [d] x))
// cond:
// result: (ROLWconst [(c+d)&15] x)
for {
c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ROLWconst {
break
}
d := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpAMD64ROLWconst)
v.AuxInt = (c + d) & 15
v.AddArg(x)
return true
}
// match: (ROLWconst [0] x)
// cond:
// result: x
for {
if v.AuxInt != 0 {
break
}
x := v.Args[0]
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
......@@ -16717,6 +16893,22 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (XORLconst [c] (XORLconst [d] x))
// cond:
// result: (XORLconst [c ^ d] x)
for {
c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64XORLconst {
break
}
d := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpAMD64XORLconst)
v.AuxInt = c ^ d
v.AddArg(x)
return true
}
// match: (XORLconst [c] x)
// cond: int32(c)==0
// result: x
......@@ -16803,6 +16995,22 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (XORQconst [c] (XORQconst [d] x))
// cond:
// result: (XORQconst [c ^ d] x)
for {
c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64XORQconst {
break
}
d := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpAMD64XORQconst)
v.AuxInt = c ^ d
v.AddArg(x)
return true
}
// match: (XORQconst [0] x)
// cond:
// result: x
......
......@@ -5122,6 +5122,22 @@ func rewriteValuegeneric_OpMul16(v *Value, config *Config) bool {
v.AuxInt = int64(int16(c * d))
return true
}
// match: (Mul16 (Const16 [-1]) x)
// cond:
// result: (Neg16 x)
for {
v_0 := v.Args[0]
if v_0.Op != OpConst16 {
break
}
if v_0.AuxInt != -1 {
break
}
x := v.Args[1]
v.reset(OpNeg16)
v.AddArg(x)
return true
}
// match: (Mul16 x (Const16 <t> [c]))
// cond: x.Op != OpConst16
// result: (Mul16 (Const16 <t> [c]) x)
......@@ -5181,6 +5197,22 @@ func rewriteValuegeneric_OpMul32(v *Value, config *Config) bool {
v.AuxInt = int64(int32(c * d))
return true
}
// match: (Mul32 (Const32 [-1]) x)
// cond:
// result: (Neg32 x)
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
break
}
if v_0.AuxInt != -1 {
break
}
x := v.Args[1]
v.reset(OpNeg32)
v.AddArg(x)
return true
}
// match: (Mul32 x (Const32 <t> [c]))
// cond: x.Op != OpConst32
// result: (Mul32 (Const32 <t> [c]) x)
......@@ -5301,6 +5333,22 @@ func rewriteValuegeneric_OpMul64(v *Value, config *Config) bool {
v.AuxInt = c * d
return true
}
// match: (Mul64 (Const64 [-1]) x)
// cond:
// result: (Neg64 x)
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != -1 {
break
}
x := v.Args[1]
v.reset(OpNeg64)
v.AddArg(x)
return true
}
// match: (Mul64 x (Const64 <t> [c]))
// cond: x.Op != OpConst64
// result: (Mul64 (Const64 <t> [c]) x)
......@@ -5421,6 +5469,22 @@ func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c * d))
return true
}
// match: (Mul8 (Const8 [-1]) x)
// cond:
// result: (Neg8 x)
for {
v_0 := v.Args[0]
if v_0.Op != OpConst8 {
break
}
if v_0.AuxInt != -1 {
break
}
x := v.Args[1]
v.reset(OpNeg8)
v.AddArg(x)
return true
}
// match: (Mul8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8
// result: (Mul8 (Const8 <t> [c]) x)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment