Commit 193510f2 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/compile: evaluate config as needed in rewrite rules

Prior to this CL, config was an explicit argument
to the SSA rewrite rules, and rules that needed
a Frontend got at it via config.
An upcoming CL moves Frontend from Config to Func,
so rules can no longer reach Frontend via Config.
Passing a Frontend as an argument to the rewrite rules
causes a 2-3% regression in compile times.
This CL takes a different approach:
It treats the variable names "config" and "fe"
as special and calculates them as needed.
The "as needed part" is also important to performance:
If they are calculated eagerly, the nilchecks themselves
cause a regression.

This introduces a little bit of magic into the rewrite
generator. However, from the perspective of the rules,
the config variable was already more or less magic.
And it makes the upcoming changes much clearer.

Passes toolstash -cmp.

Change-Id: I173f2bcc124cba43d53138bfa3775e21316a9107
Reviewed-on: https://go-review.googlesource.com/38326
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: default avatarMatthew Dempsky <mdempsky@google.com>
parent 09272ae9
......@@ -15,31 +15,36 @@ import (
// It is created once, early during compilation,
// and shared across all compilations.
type Config struct {
arch string // "amd64", etc.
IntSize int64 // 4 or 8
PtrSize int64 // 4 or 8
RegSize int64 // 4 or 8
lowerBlock func(*Block, *Config) bool // lowering function
lowerValue func(*Value, *Config) bool // lowering function
registers []Register // machine registers
gpRegMask regMask // general purpose integer register mask
fpRegMask regMask // floating point register mask
specialRegMask regMask // special register mask
FPReg int8 // register number of frame pointer, -1 if not used
LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
hasGReg bool // has hardware g register
fe Frontend // callbacks into compiler frontend
ctxt *obj.Link // Generic arch information
optimize bool // Do optimization
noDuffDevice bool // Don't use Duff's device
nacl bool // GOOS=nacl
use387 bool // GO386=387
OldArch bool // True for older versions of architecture, e.g. true for PPC64BE, false for PPC64LE
NeedsFpScratch bool // No direct move between GP and FP register sets
BigEndian bool //
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
arch string // "amd64", etc.
IntSize int64 // 4 or 8
PtrSize int64 // 4 or 8
RegSize int64 // 4 or 8
lowerBlock blockRewriter // lowering function
lowerValue valueRewriter // lowering function
registers []Register // machine registers
gpRegMask regMask // general purpose integer register mask
fpRegMask regMask // floating point register mask
specialRegMask regMask // special register mask
FPReg int8 // register number of frame pointer, -1 if not used
LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
hasGReg bool // has hardware g register
fe Frontend // callbacks into compiler frontend
ctxt *obj.Link // Generic arch information
optimize bool // Do optimization
noDuffDevice bool // Don't use Duff's device
nacl bool // GOOS=nacl
use387 bool // GO386=387
OldArch bool // True for older versions of architecture, e.g. true for PPC64BE, false for PPC64LE
NeedsFpScratch bool // No direct move between GP and FP register sets
BigEndian bool //
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
}
type (
blockRewriter func(*Block) bool
valueRewriter func(*Value) bool
)
type TypeSource interface {
TypeBool() Type
TypeInt8() Type
......
......@@ -68,8 +68,8 @@
(Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x)
(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Neg32F x) && config.use387 -> (FCHS x)
(Neg64F x) && config.use387 -> (FCHS x)
......
......@@ -78,8 +78,8 @@
(Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x)
(Neg32F x) -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
(Neg64F x) -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Neg32F x) -> (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
(Neg64F x) -> (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Com64 x) -> (NOTQ x)
(Com32 x) -> (NOTL x)
......@@ -98,10 +98,10 @@
// Lowering other arithmetic
(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
(Ctz32 x) -> (Select0 (BSFQ (ORQ <config.Frontend().TypeUInt64()> (MOVQconst [1<<32]) x)))
(Ctz32 x) -> (Select0 (BSFQ (ORQ <fe.TypeUInt64()> (MOVQconst [1<<32]) x)))
(BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x))))
(BitLen32 x) -> (BitLen64 (MOVLQZX <config.Frontend().TypeUInt64()> x))
(BitLen32 x) -> (BitLen64 (MOVLQZX <fe.TypeUInt64()> x))
(Bswap64 x) -> (BSWAPQ x)
(Bswap32 x) -> (BSWAPL x)
......@@ -472,10 +472,10 @@
// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem))
(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(fe.TypeUInt32(),TypeMem)> val ptr mem))
(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(fe.TypeUInt64(),TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
// Atomic exchanges.
(AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
......@@ -553,8 +553,8 @@
(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no)
// Disabled because it interferes with the pattern match above and makes worse code.
// (SETNEF x) -> (ORQ (SETNE <config.Frontend().TypeInt8()> x) (SETNAN <config.Frontend().TypeInt8()> x))
// (SETEQF x) -> (ANDQ (SETEQ <config.Frontend().TypeInt8()> x) (SETORD <config.Frontend().TypeInt8()> x))
// (SETNEF x) -> (ORQ (SETNE <fe.TypeInt8()> x) (SETNAN <fe.TypeInt8()> x))
// (SETEQF x) -> (ANDQ (SETEQ <fe.TypeInt8()> x) (SETORD <fe.TypeInt8()> x))
// fold constants into instructions
(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
......
......@@ -34,12 +34,12 @@
(Mul32uhilo x y) -> (MULLU x y)
(Div32 x y) ->
(SUB (XOR <config.fe.TypeUInt32()> // negate the result if one operand is negative
(Select0 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
(SUB <config.fe.TypeUInt32()> (XOR x <config.fe.TypeUInt32()> (Signmask x)) (Signmask x)) // negate x if negative
(SUB <config.fe.TypeUInt32()> (XOR y <config.fe.TypeUInt32()> (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask (XOR <config.fe.TypeUInt32()> x y))) (Signmask (XOR <config.fe.TypeUInt32()> x y)))
(Div32u x y) -> (Select0 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(SUB (XOR <fe.TypeUInt32()> // negate the result if one operand is negative
(Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
(SUB <fe.TypeUInt32()> (XOR x <fe.TypeUInt32()> (Signmask x)) (Signmask x)) // negate x if negative
(SUB <fe.TypeUInt32()> (XOR y <fe.TypeUInt32()> (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask (XOR <fe.TypeUInt32()> x y))) (Signmask (XOR <fe.TypeUInt32()> x y)))
(Div32u x y) -> (Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y))
(Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y))
......@@ -48,12 +48,12 @@
(Div64F x y) -> (DIVD x y)
(Mod32 x y) ->
(SUB (XOR <config.fe.TypeUInt32()> // negate the result if x is negative
(Select1 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
(SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> x (Signmask x)) (Signmask x)) // negate x if negative
(SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) // negate y if negative
(SUB (XOR <fe.TypeUInt32()> // negate the result if x is negative
(Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
(SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> x (Signmask x)) (Signmask x)) // negate x if negative
(SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask x)) (Signmask x))
(Mod32u x y) -> (Select1 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(Mod32u x y) -> (Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
......@@ -111,7 +111,7 @@
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
(EqB x y) -> (XORconst [1] (XOR <config.fe.TypeBool()> x y))
(EqB x y) -> (XORconst [1] (XOR <fe.TypeBool()> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
......@@ -160,11 +160,11 @@
(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRLconst x [c])
(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLLconst x [c])
(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLLconst x [c])
(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
// large constant shifts
(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
......@@ -176,8 +176,8 @@
// large constant signed right shift, we leave the sign bit
(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAconst x [31])
(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
// constants
(Const8 [val]) -> (MOVWconst [val])
......@@ -204,7 +204,7 @@
(SignExt16to32 x) -> (MOVHreg x)
(Signmask x) -> (SRAconst x [31])
(Zeromask x) -> (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
(Zeromask x) -> (SRAconst (RSBshiftRL <fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
(Slicemask <t> x) -> (SRAconst (RSBconst <t> [0] x) [31])
// float <-> int conversion
......
......@@ -27,8 +27,8 @@
(Hmul64 x y) -> (MULH x y)
(Hmul64u x y) -> (UMULH x y)
(Hmul32 x y) -> (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
(Hmul32u x y) -> (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
(Hmul32 x y) -> (SRAconst (MULL <fe.TypeInt64()> x y) [32])
(Hmul32u x y) -> (SRAconst (UMULL <fe.TypeUInt64()> x y) [32])
(Div64 x y) -> (DIV x y)
(Div64u x y) -> (UDIV x y)
......@@ -86,20 +86,20 @@
(Ctz64 <t> x) -> (CLZ (RBIT <t> x))
(Ctz32 <t> x) -> (CLZW (RBITW <t> x))
(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <config.fe.TypeInt()> x))
(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <fe.TypeInt()> x))
(Bswap64 x) -> (REV x)
(Bswap32 x) -> (REVW x)
(BitRev64 x) -> (RBIT x)
(BitRev32 x) -> (RBITW x)
(BitRev16 x) -> (SRLconst [48] (RBIT <config.fe.TypeUInt64()> x))
(BitRev8 x) -> (SRLconst [56] (RBIT <config.fe.TypeUInt64()> x))
(BitRev16 x) -> (SRLconst [48] (RBIT <fe.TypeUInt64()> x))
(BitRev8 x) -> (SRLconst [56] (RBIT <fe.TypeUInt64()> x))
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
(EqB x y) -> (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
(EqB x y) -> (XOR (MOVDconst [1]) (XOR <fe.TypeBool()> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XOR (MOVDconst [1]) x)
......
......@@ -10,7 +10,7 @@
(Add64F x y) -> (ADDD x y)
(Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
(Select1 (Add32carry <t> x y)) -> (SGTU <config.fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
(Select1 (Add32carry <t> x y)) -> (SGTU <fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
(Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
(SubPtr x y) -> (SUB x y)
......@@ -21,7 +21,7 @@
(Sub64F x y) -> (SUBD x y)
(Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
(Select1 (Sub32carry <t> x y)) -> (SGTU <config.fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
(Select1 (Sub32carry <t> x y)) -> (SGTU <fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
(Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
(Mul32 x y) -> (MUL x y)
......@@ -72,11 +72,11 @@
(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
// large constant shifts
(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
......@@ -88,8 +88,8 @@
// large constant signed right shift, we leave the sign bit
(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
// shifts
// hardware instruction uses only the low 5 bits of the shift
......@@ -118,17 +118,17 @@
(Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
(Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
(Rsh32x32 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh32x16 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh32x8 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh32x32 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh32x16 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh32x8 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
// unary ops
(Neg32 x) -> (NEG x)
......@@ -153,7 +153,7 @@
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
(EqB x y) -> (XORconst [1] (XOR <config.fe.TypeBool()> x y))
(EqB x y) -> (XORconst [1] (XOR <fe.TypeBool()> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
......@@ -393,41 +393,41 @@
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
(AtomicOr8 ptr val mem) && !config.BigEndian ->
(LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <config.fe.TypeUInt32()> [3]
(ANDconst <config.fe.TypeUInt32()> [3] ptr))) mem)
(LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3] ptr))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
(AtomicAnd8 ptr val mem) && !config.BigEndian ->
(LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <config.fe.TypeUInt32()> [3]
(ANDconst <config.fe.TypeUInt32()> [3] ptr)))
(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()>
(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3]
(ANDconst <config.fe.TypeUInt32()> [3]
(XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
(LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3] ptr)))
(NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()>
(MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3]
(XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
(AtomicOr8 ptr val mem) && config.BigEndian ->
(LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <config.fe.TypeUInt32()> [3]
(ANDconst <config.fe.TypeUInt32()> [3]
(XORconst <config.fe.TypeUInt32()> [3] ptr)))) mem)
(LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3]
(XORconst <fe.TypeUInt32()> [3] ptr)))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
(AtomicAnd8 ptr val mem) && config.BigEndian ->
(LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <config.fe.TypeUInt32()> [3]
(ANDconst <config.fe.TypeUInt32()> [3]
(XORconst <config.fe.TypeUInt32()> [3] ptr))))
(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()>
(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3]
(ANDconst <config.fe.TypeUInt32()> [3]
(XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
(LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3]
(XORconst <fe.TypeUInt32()> [3] ptr))))
(NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()>
(MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3]
(XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
// checks
......
......@@ -27,8 +27,8 @@
(Hmul64 x y) -> (Select0 (MULV x y))
(Hmul64u x y) -> (Select0 (MULVU x y))
(Hmul32 x y) -> (SRAVconst (Select1 <config.fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
(Hmul32u x y) -> (SRLVconst (Select1 <config.fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
(Hmul32 x y) -> (SRAVconst (Select1 <fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
(Hmul32u x y) -> (SRLVconst (Select1 <fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
(Div64 x y) -> (Select1 (DIVV x y))
(Div64u x y) -> (Select1 (DIVVU x y))
......@@ -71,65 +71,65 @@
// shifts
// hardware instruction uses only the low 6 bits of the shift
// we compare to 64 to ensure Go semantics for large shifts
(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
// unary ops
(Neg64 x) -> (NEGV x)
......@@ -147,7 +147,7 @@
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
(EqB x y) -> (XOR (MOVVconst [1]) (XOR <config.fe.TypeBool()> x y))
(EqB x y) -> (XOR (MOVVconst [1]) (XOR <fe.TypeBool()> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
......
......@@ -154,72 +154,72 @@
(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
(Rsh64x64 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
(Rsh64Ux64 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
(Lsh64x64 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
(Rsh64x64 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
(Rsh64Ux64 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
(Lsh64x64 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
(Rsh32x64 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
(Rsh32Ux64 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
(Lsh32x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
(Rsh32x64 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
(Rsh32Ux64 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
(Lsh32x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
(Lsh16x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
(Lsh16x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
(Lsh8x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
(Lsh8x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
(Rsh64x32 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
(Rsh64Ux32 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
(Lsh64x32 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
(Rsh64x32 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
(Rsh64Ux32 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
(Lsh64x32 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
(Rsh32x32 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
(Rsh32Ux32 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
(Lsh32x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
(Rsh32x32 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
(Rsh32Ux32 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
(Lsh32x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
(Lsh16x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
(Lsh16x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
(Lsh8x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
(Lsh8x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
(Rsh64x16 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
(Rsh64Ux16 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
(Lsh64x16 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
(Rsh64x16 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
(Rsh64Ux16 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
(Lsh64x16 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
(Rsh32x16 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
(Rsh32Ux16 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
(Lsh32x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
(Rsh32x16 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
(Rsh32Ux16 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
(Lsh32x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
(Lsh16x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
(Lsh16x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
(Lsh8x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
(Lsh8x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
(Rsh64x8 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
(Rsh64Ux8 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
(Lsh64x8 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
(Rsh64x8 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
(Rsh64Ux8 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
(Lsh64x8 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
(Rsh32x8 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
(Rsh32Ux8 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
(Lsh32x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
(Rsh32x8 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
(Rsh32Ux8 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
(Lsh32x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
(Lsh16x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
(Lsh16x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
(Lsh8x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
(Lsh8x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
// Cleaning up shift ops when input is masked
(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
......@@ -233,7 +233,7 @@
(Addr {sym} base) -> (MOVDaddr {sym} base)
// (Addr {sym} base) -> (ADDconst {sym} base)
(OffPtr [off] ptr) -> (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
(OffPtr [off] ptr) -> (ADD (MOVDconst <fe.TypeInt64()> [off]) ptr)
(And64 x y) -> (AND x y)
(And32 x y) -> (AND x y)
......
......@@ -437,7 +437,7 @@
(If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
(If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <config.fe.TypeBool()> cond)) yes no)
(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <fe.TypeBool()> cond)) yes no)
// ***************************
// Above: lowering rules
......
......@@ -13,28 +13,28 @@
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 ->
(ComplexMake
(Load <config.fe.TypeFloat32()> ptr mem)
(Load <config.fe.TypeFloat32()>
(OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr)
(Load <fe.TypeFloat32()> ptr mem)
(Load <fe.TypeFloat32()>
(OffPtr <fe.TypeFloat32().PtrTo()> [4] ptr)
mem)
)
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 8 ->
(Store {config.fe.TypeFloat32()}
(OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst)
(Store {fe.TypeFloat32()}
(OffPtr <fe.TypeFloat32().PtrTo()> [4] dst)
imag
(Store {config.fe.TypeFloat32()} dst real mem))
(Store {fe.TypeFloat32()} dst real mem))
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
(ComplexMake
(Load <config.fe.TypeFloat64()> ptr mem)
(Load <config.fe.TypeFloat64()>
(OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr)
(Load <fe.TypeFloat64()> ptr mem)
(Load <fe.TypeFloat64()>
(OffPtr <fe.TypeFloat64().PtrTo()> [8] ptr)
mem)
)
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 16 ->
(Store {config.fe.TypeFloat64()}
(OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst)
(Store {fe.TypeFloat64()}
(OffPtr <fe.TypeFloat64().PtrTo()> [8] dst)
imag
(Store {config.fe.TypeFloat64()} dst real mem))
(Store {fe.TypeFloat64()} dst real mem))
// string ops
(StringPtr (StringMake ptr _)) -> ptr
......@@ -42,15 +42,15 @@
(Load <t> ptr mem) && t.IsString() ->
(StringMake
(Load <config.fe.TypeBytePtr()> ptr mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
(Load <fe.TypeBytePtr()> ptr mem)
(Load <fe.TypeInt()>
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
mem))
(Store dst (StringMake ptr len) mem) ->
(Store {config.fe.TypeInt()}
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)
(Store {fe.TypeInt()}
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst)
len
(Store {config.fe.TypeBytePtr()} dst ptr mem))
(Store {fe.TypeBytePtr()} dst ptr mem))
// slice ops
(SlicePtr (SliceMake ptr _ _ )) -> ptr
......@@ -60,20 +60,20 @@
(Load <t> ptr mem) && t.IsSlice() ->
(SliceMake
(Load <t.ElemType().PtrTo()> ptr mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
(Load <fe.TypeInt()>
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)
(Load <fe.TypeInt()>
(OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)
mem))
(Store dst (SliceMake ptr len cap) mem) ->
(Store {config.fe.TypeInt()}
(OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)
(Store {fe.TypeInt()}
(OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)
cap
(Store {config.fe.TypeInt()}
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)
(Store {fe.TypeInt()}
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst)
len
(Store {config.fe.TypeBytePtr()} dst ptr mem)))
(Store {fe.TypeBytePtr()} dst ptr mem)))
// interface ops
(ITab (IMake itab _)) -> itab
......@@ -81,12 +81,12 @@
(Load <t> ptr mem) && t.IsInterface() ->
(IMake
(Load <config.fe.TypeBytePtr()> ptr mem)
(Load <config.fe.TypeBytePtr()>
(OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)
(Load <fe.TypeBytePtr()> ptr mem)
(Load <fe.TypeBytePtr()>
(OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)
mem))
(Store dst (IMake itab data) mem) ->
(Store {config.fe.TypeBytePtr()}
(OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)
(Store {fe.TypeBytePtr()}
(OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)
data
(Store {config.fe.TypeUintptr()} dst itab mem))
(Store {fe.TypeUintptr()} dst itab mem))
......@@ -12,23 +12,23 @@
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() ->
(Int64Make
(Load <config.fe.TypeInt32()> (OffPtr <config.fe.TypeInt32().PtrTo()> [4] ptr) mem)
(Load <config.fe.TypeUInt32()> ptr mem))
(Load <fe.TypeInt32()> (OffPtr <fe.TypeInt32().PtrTo()> [4] ptr) mem)
(Load <fe.TypeUInt32()> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() ->
(Int64Make
(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem)
(Load <config.fe.TypeUInt32()> ptr mem))
(Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem)
(Load <fe.TypeUInt32()> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() ->
(Int64Make
(Load <config.fe.TypeInt32()> ptr mem)
(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
(Load <fe.TypeInt32()> ptr mem)
(Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() ->
(Int64Make
(Load <config.fe.TypeUInt32()> ptr mem)
(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
(Load <fe.TypeUInt32()> ptr mem)
(Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem))
(Store {t} dst (Int64Make hi lo) mem) && t.(Type).Size() == 8 && !config.BigEndian ->
(Store {hi.Type}
......@@ -44,94 +44,94 @@
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() ->
(Int64Make
(Arg <config.fe.TypeInt32()> {n} [off+4])
(Arg <config.fe.TypeUInt32()> {n} [off]))
(Arg <fe.TypeInt32()> {n} [off+4])
(Arg <fe.TypeUInt32()> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() ->
(Int64Make
(Arg <config.fe.TypeUInt32()> {n} [off+4])
(Arg <config.fe.TypeUInt32()> {n} [off]))
(Arg <fe.TypeUInt32()> {n} [off+4])
(Arg <fe.TypeUInt32()> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() ->
(Int64Make
(Arg <config.fe.TypeInt32()> {n} [off])
(Arg <config.fe.TypeUInt32()> {n} [off+4]))
(Arg <fe.TypeInt32()> {n} [off])
(Arg <fe.TypeUInt32()> {n} [off+4]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() ->
(Int64Make
(Arg <config.fe.TypeUInt32()> {n} [off])
(Arg <config.fe.TypeUInt32()> {n} [off+4]))
(Arg <fe.TypeUInt32()> {n} [off])
(Arg <fe.TypeUInt32()> {n} [off+4]))
(Add64 x y) ->
(Int64Make
(Add32withcarry <config.fe.TypeInt32()>
(Add32withcarry <fe.TypeInt32()>
(Int64Hi x)
(Int64Hi y)
(Select1 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
(Select0 <config.fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
(Select0 <fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
(Sub64 x y) ->
(Int64Make
(Sub32withcarry <config.fe.TypeInt32()>
(Sub32withcarry <fe.TypeInt32()>
(Int64Hi x)
(Int64Hi y)
(Select1 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
(Select0 <config.fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
(Select0 <fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
(Mul64 x y) ->
(Int64Make
(Add32 <config.fe.TypeUInt32()>
(Mul32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Hi y))
(Add32 <config.fe.TypeUInt32()>
(Mul32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Lo y))
(Select0 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
(Select1 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
(Add32 <fe.TypeUInt32()>
(Mul32 <fe.TypeUInt32()> (Int64Lo x) (Int64Hi y))
(Add32 <fe.TypeUInt32()>
(Mul32 <fe.TypeUInt32()> (Int64Hi x) (Int64Lo y))
(Select0 <fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
(Select1 <fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
(And64 x y) ->
(Int64Make
(And32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
(And32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(And32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
(And32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(Or64 x y) ->
(Int64Make
(Or32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
(Or32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(Or32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
(Or32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(Xor64 x y) ->
(Int64Make
(Xor32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
(Xor32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(Xor32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
(Xor32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(Neg64 <t> x) -> (Sub64 (Const64 <t> [0]) x)
(Com64 x) ->
(Int64Make
(Com32 <config.fe.TypeUInt32()> (Int64Hi x))
(Com32 <config.fe.TypeUInt32()> (Int64Lo x)))
(Com32 <fe.TypeUInt32()> (Int64Hi x))
(Com32 <fe.TypeUInt32()> (Int64Lo x)))
(Ctz64 x) ->
(Add32 <config.fe.TypeUInt32()>
(Ctz32 <config.fe.TypeUInt32()> (Int64Lo x))
(And32 <config.fe.TypeUInt32()>
(Com32 <config.fe.TypeUInt32()> (Zeromask (Int64Lo x)))
(Ctz32 <config.fe.TypeUInt32()> (Int64Hi x))))
(Add32 <fe.TypeUInt32()>
(Ctz32 <fe.TypeUInt32()> (Int64Lo x))
(And32 <fe.TypeUInt32()>
(Com32 <fe.TypeUInt32()> (Zeromask (Int64Lo x)))
(Ctz32 <fe.TypeUInt32()> (Int64Hi x))))
(BitLen64 x) ->
(Add32 <config.fe.TypeInt()>
(BitLen32 <config.fe.TypeInt()> (Int64Hi x))
(BitLen32 <config.fe.TypeInt()>
(Or32 <config.fe.TypeUInt32()>
(Add32 <fe.TypeInt()>
(BitLen32 <fe.TypeInt()> (Int64Hi x))
(BitLen32 <fe.TypeInt()>
(Or32 <fe.TypeUInt32()>
(Int64Lo x)
(Zeromask (Int64Hi x)))))
(Bswap64 x) ->
(Int64Make
(Bswap32 <config.fe.TypeUInt32()> (Int64Lo x))
(Bswap32 <config.fe.TypeUInt32()> (Int64Hi x)))
(Bswap32 <fe.TypeUInt32()> (Int64Lo x))
(Bswap32 <fe.TypeUInt32()> (Int64Hi x)))
(SignExt32to64 x) -> (Int64Make (Signmask x) x)
(SignExt16to64 x) -> (SignExt32to64 (SignExt16to32 x))
(SignExt8to64 x) -> (SignExt32to64 (SignExt8to32 x))
(ZeroExt32to64 x) -> (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) x)
(ZeroExt32to64 x) -> (Int64Make (Const32 <fe.TypeUInt32()> [0]) x)
(ZeroExt16to64 x) -> (ZeroExt32to64 (ZeroExt16to32 x))
(ZeroExt8to64 x) -> (ZeroExt32to64 (ZeroExt8to32 x))
......@@ -170,160 +170,160 @@
// turn x64 non-constant shifts to x32 shifts
// if high 32-bit of the shift is nonzero, make a huge shift
(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Lsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh64x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Rsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh64x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Rsh64Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh64Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Lsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh32x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Rsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh32x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Rsh32Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh32Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Lsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh16x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Rsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh16x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Rsh16Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh16Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Lsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh8x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Rsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh8x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
(Rsh8Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh8Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
// 64x left shift
// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
// result.lo = lo<<s
(Lsh64x32 (Int64Make hi lo) s) ->
(Int64Make
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Lsh32x32 <config.fe.TypeUInt32()> hi s)
(Rsh32Ux32 <config.fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Lsh32x32 <fe.TypeUInt32()> hi s)
(Rsh32Ux32 <fe.TypeUInt32()>
lo
(Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
(Lsh32x32 <config.fe.TypeUInt32()>
(Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
(Lsh32x32 <fe.TypeUInt32()>
lo
(Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32]))))
(Lsh32x32 <config.fe.TypeUInt32()> lo s))
(Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32]))))
(Lsh32x32 <fe.TypeUInt32()> lo s))
(Lsh64x16 (Int64Make hi lo) s) ->
(Int64Make
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Lsh32x16 <config.fe.TypeUInt32()> hi s)
(Rsh32Ux16 <config.fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Lsh32x16 <fe.TypeUInt32()> hi s)
(Rsh32Ux16 <fe.TypeUInt32()>
lo
(Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
(Lsh32x16 <config.fe.TypeUInt32()>
(Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
(Lsh32x16 <fe.TypeUInt32()>
lo
(Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32]))))
(Lsh32x16 <config.fe.TypeUInt32()> lo s))
(Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32]))))
(Lsh32x16 <fe.TypeUInt32()> lo s))
(Lsh64x8 (Int64Make hi lo) s) ->
(Int64Make
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Lsh32x8 <config.fe.TypeUInt32()> hi s)
(Rsh32Ux8 <config.fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Lsh32x8 <fe.TypeUInt32()> hi s)
(Rsh32Ux8 <fe.TypeUInt32()>
lo
(Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
(Lsh32x8 <config.fe.TypeUInt32()>
(Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
(Lsh32x8 <fe.TypeUInt32()>
lo
(Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32]))))
(Lsh32x8 <config.fe.TypeUInt32()> lo s))
(Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32]))))
(Lsh32x8 <fe.TypeUInt32()> lo s))
// 64x unsigned right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
(Rsh64Ux32 (Int64Make hi lo) s) ->
(Int64Make
(Rsh32Ux32 <config.fe.TypeUInt32()> hi s)
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Rsh32Ux32 <config.fe.TypeUInt32()> lo s)
(Lsh32x32 <config.fe.TypeUInt32()>
(Rsh32Ux32 <fe.TypeUInt32()> hi s)
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Rsh32Ux32 <fe.TypeUInt32()> lo s)
(Lsh32x32 <fe.TypeUInt32()>
hi
(Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
(Rsh32Ux32 <config.fe.TypeUInt32()>
(Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
(Rsh32Ux32 <fe.TypeUInt32()>
hi
(Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))))
(Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32])))))
(Rsh64Ux16 (Int64Make hi lo) s) ->
(Int64Make
(Rsh32Ux16 <config.fe.TypeUInt32()> hi s)
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Rsh32Ux16 <config.fe.TypeUInt32()> lo s)
(Lsh32x16 <config.fe.TypeUInt32()>
(Rsh32Ux16 <fe.TypeUInt32()> hi s)
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Rsh32Ux16 <fe.TypeUInt32()> lo s)
(Lsh32x16 <fe.TypeUInt32()>
hi
(Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
(Rsh32Ux16 <config.fe.TypeUInt32()>
(Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
(Rsh32Ux16 <fe.TypeUInt32()>
hi
(Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))))
(Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32])))))
(Rsh64Ux8 (Int64Make hi lo) s) ->
(Int64Make
(Rsh32Ux8 <config.fe.TypeUInt32()> hi s)
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Rsh32Ux8 <config.fe.TypeUInt32()> lo s)
(Lsh32x8 <config.fe.TypeUInt32()>
(Rsh32Ux8 <fe.TypeUInt32()> hi s)
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Rsh32Ux8 <fe.TypeUInt32()> lo s)
(Lsh32x8 <fe.TypeUInt32()>
hi
(Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
(Rsh32Ux8 <config.fe.TypeUInt32()>
(Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
(Rsh32Ux8 <fe.TypeUInt32()>
hi
(Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))))
(Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32])))))
// 64x signed right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
(Rsh64x32 (Int64Make hi lo) s) ->
(Int64Make
(Rsh32x32 <config.fe.TypeUInt32()> hi s)
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Rsh32Ux32 <config.fe.TypeUInt32()> lo s)
(Lsh32x32 <config.fe.TypeUInt32()>
(Rsh32x32 <fe.TypeUInt32()> hi s)
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Rsh32Ux32 <fe.TypeUInt32()> lo s)
(Lsh32x32 <fe.TypeUInt32()>
hi
(Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
(And32 <config.fe.TypeUInt32()>
(Rsh32x32 <config.fe.TypeUInt32()>
(Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
(And32 <fe.TypeUInt32()>
(Rsh32x32 <fe.TypeUInt32()>
hi
(Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))
(Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32])))
(Zeromask
(Rsh32Ux32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [5]))))))
(Rsh32Ux32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [5]))))))
(Rsh64x16 (Int64Make hi lo) s) ->
(Int64Make
(Rsh32x16 <config.fe.TypeUInt32()> hi s)
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Rsh32Ux16 <config.fe.TypeUInt32()> lo s)
(Lsh32x16 <config.fe.TypeUInt32()>
(Rsh32x16 <fe.TypeUInt32()> hi s)
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Rsh32Ux16 <fe.TypeUInt32()> lo s)
(Lsh32x16 <fe.TypeUInt32()>
hi
(Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
(And32 <config.fe.TypeUInt32()>
(Rsh32x16 <config.fe.TypeUInt32()>
(Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
(And32 <fe.TypeUInt32()>
(Rsh32x16 <fe.TypeUInt32()>
hi
(Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))
(Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32])))
(Zeromask
(ZeroExt16to32
(Rsh16Ux32 <config.fe.TypeUInt16()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
(Rsh16Ux32 <fe.TypeUInt16()> s (Const32 <fe.TypeUInt32()> [5])))))))
(Rsh64x8 (Int64Make hi lo) s) ->
(Int64Make
(Rsh32x8 <config.fe.TypeUInt32()> hi s)
(Or32 <config.fe.TypeUInt32()>
(Or32 <config.fe.TypeUInt32()>
(Rsh32Ux8 <config.fe.TypeUInt32()> lo s)
(Lsh32x8 <config.fe.TypeUInt32()>
(Rsh32x8 <fe.TypeUInt32()> hi s)
(Or32 <fe.TypeUInt32()>
(Or32 <fe.TypeUInt32()>
(Rsh32Ux8 <fe.TypeUInt32()> lo s)
(Lsh32x8 <fe.TypeUInt32()>
hi
(Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
(And32 <config.fe.TypeUInt32()>
(Rsh32x8 <config.fe.TypeUInt32()>
(Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
(And32 <fe.TypeUInt32()>
(Rsh32x8 <fe.TypeUInt32()>
hi
(Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))
(Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32])))
(Zeromask
(ZeroExt8to32
(Rsh8Ux32 <config.fe.TypeUInt8()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
(Rsh8Ux32 <fe.TypeUInt8()> s (Const32 <fe.TypeUInt32()> [5])))))))
// 64xConst32 shifts
// we probably do not need them -- lateopt may take care of them just fine
......@@ -333,48 +333,48 @@
//
//(Lsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c-32]))
// (Const32 <config.fe.TypeUInt32()> [0]))
// (Lsh32x32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c-32]))
// (Const32 <fe.TypeUInt32()> [0]))
//(Rsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
// (Signmask (Int64Hi x))
// (Rsh32x32 <config.fe.TypeInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c-32])))
// (Rsh32x32 <fe.TypeInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c-32])))
//(Rsh64Ux32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
// (Const32 <config.fe.TypeUInt32()> [0])
// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c-32])))
// (Const32 <fe.TypeUInt32()> [0])
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c-32])))
//
//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [0]))
//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <fe.TypeUInt32()> [0]))
//(Rsh64x32 x (Const32 [32])) -> (Int64Make (Signmask (Int64Hi x)) (Int64Hi x))
//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) (Int64Hi x))
//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <fe.TypeUInt32()> [0]) (Int64Hi x))
//
//(Lsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
// (Or32 <config.fe.TypeUInt32()>
// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [32-c])))
// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c])))
// (Or32 <fe.TypeUInt32()>
// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [32-c])))
// (Lsh32x32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c])))
//(Rsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
// (Rsh32x32 <config.fe.TypeInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
// (Or32 <config.fe.TypeUInt32()>
// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c]))
// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [32-c]))))
// (Rsh32x32 <fe.TypeInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
// (Or32 <fe.TypeUInt32()>
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c]))
// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [32-c]))))
//(Rsh64Ux32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
// (Or32 <config.fe.TypeUInt32()>
// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c]))
// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [32-c]))))
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
// (Or32 <fe.TypeUInt32()>
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c]))
// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [32-c]))))
//
//(Lsh64x32 x (Const32 [0])) -> x
//(Rsh64x32 x (Const32 [0])) -> x
//(Rsh64Ux32 x (Const32 [0])) -> x
(Const64 <t> [c]) && t.IsSigned() ->
(Int64Make (Const32 <config.fe.TypeInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
(Int64Make (Const32 <fe.TypeInt32()> [c>>32]) (Const32 <fe.TypeUInt32()> [int64(int32(c))]))
(Const64 <t> [c]) && !t.IsSigned() ->
(Int64Make (Const32 <config.fe.TypeUInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
(Int64Make (Const32 <fe.TypeUInt32()> [c>>32]) (Const32 <fe.TypeUInt32()> [int64(int32(c))]))
(Eq64 x y) ->
(AndB
......
......@@ -155,14 +155,14 @@
(Mul64 (Const64 [-1]) x) -> (Neg64 x)
// Convert multiplication by a power of two to a shift.
(Mul8 <t> n (Const8 [c])) && isPowerOfTwo(c) -> (Lsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Mul16 <t> n (Const16 [c])) && isPowerOfTwo(c) -> (Lsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Mul32 <t> n (Const32 [c])) && isPowerOfTwo(c) -> (Lsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Mul64 <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Lsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg8 (Lsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg16 (Lsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg32 (Lsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg64 (Lsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
(Mul8 <t> n (Const8 [c])) && isPowerOfTwo(c) -> (Lsh8x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
(Mul16 <t> n (Const16 [c])) && isPowerOfTwo(c) -> (Lsh16x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
(Mul32 <t> n (Const32 [c])) && isPowerOfTwo(c) -> (Lsh32x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
(Mul64 <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Lsh64x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg8 (Lsh8x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg16 (Lsh16x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg32 (Lsh32x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg64 (Lsh64x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(int8(c % d))])
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(c % d))])
......@@ -481,46 +481,46 @@
// ((x >> c1) << c2) >> c3
(Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-> (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-> (Rsh64Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-> (Rsh32Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-> (Rsh32Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-> (Rsh16Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-> (Rsh16Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-> (Rsh8Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-> (Rsh8Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
// ((x << c1) >> c2) << c3
(Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-> (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-> (Lsh64x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-> (Lsh32x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-> (Lsh32x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-> (Lsh16x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-> (Lsh16x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-> (Lsh8x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-> (Lsh8x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
// replace shifts with zero extensions
(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (ZeroExt8to16 (Trunc16to8 <config.fe.TypeUInt8()> x))
(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (ZeroExt8to32 (Trunc32to8 <config.fe.TypeUInt8()> x))
(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (ZeroExt8to64 (Trunc64to8 <config.fe.TypeUInt8()> x))
(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (ZeroExt16to32 (Trunc32to16 <config.fe.TypeUInt16()> x))
(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (ZeroExt16to64 (Trunc64to16 <config.fe.TypeUInt16()> x))
(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (ZeroExt32to64 (Trunc64to32 <config.fe.TypeUInt32()> x))
(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (ZeroExt8to16 (Trunc16to8 <fe.TypeUInt8()> x))
(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (ZeroExt8to32 (Trunc32to8 <fe.TypeUInt8()> x))
(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (ZeroExt8to64 (Trunc64to8 <fe.TypeUInt8()> x))
(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (ZeroExt16to32 (Trunc32to16 <fe.TypeUInt16()> x))
(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (ZeroExt16to64 (Trunc64to16 <fe.TypeUInt16()> x))
(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (ZeroExt32to64 (Trunc64to32 <fe.TypeUInt32()> x))
// replace shifts with sign extensions
(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (SignExt8to16 (Trunc16to8 <config.fe.TypeInt8()> x))
(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (SignExt8to32 (Trunc32to8 <config.fe.TypeInt8()> x))
(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (SignExt8to64 (Trunc64to8 <config.fe.TypeInt8()> x))
(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (SignExt16to32 (Trunc32to16 <config.fe.TypeInt16()> x))
(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (SignExt16to64 (Trunc64to16 <config.fe.TypeInt16()> x))
(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (SignExt32to64 (Trunc64to32 <config.fe.TypeInt32()> x))
(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (SignExt8to16 (Trunc16to8 <fe.TypeInt8()> x))
(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (SignExt8to32 (Trunc32to8 <fe.TypeInt8()> x))
(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (SignExt8to64 (Trunc64to8 <fe.TypeInt8()> x))
(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (SignExt16to32 (Trunc32to16 <fe.TypeInt16()> x))
(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (SignExt16to64 (Trunc64to16 <fe.TypeInt16()> x))
(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (SignExt32to64 (Trunc64to32 <fe.TypeInt32()> x))
// constant comparisons
(Eq64 (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(c == d)])
......@@ -754,8 +754,8 @@
// indexing operations
// Note: bounds check has already been done
(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <config.fe.TypeInt()> idx (Const32 <config.fe.TypeInt()> [t.ElemType().Size()])))
(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.ElemType().Size()])))
(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <fe.TypeInt()> idx (Const32 <fe.TypeInt()> [t.ElemType().Size()])))
(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <fe.TypeInt()> idx (Const64 <fe.TypeInt()> [t.ElemType().Size()])))
// struct operations
(StructSelect (StructMake1 x)) -> x
......@@ -769,28 +769,28 @@
(StructSelect [2] (StructMake4 _ _ x _)) -> x
(StructSelect [3] (StructMake4 _ _ _ x)) -> x
(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t) ->
(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) ->
(StructMake0)
(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t) ->
(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) ->
(StructMake1
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t) ->
(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) ->
(StructMake2
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t) ->
(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) ->
(StructMake3
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
(Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t) ->
(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) ->
(StructMake4
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
(Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)
(Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
(StructSelect [i] x:(Load <t> ptr mem)) && !config.fe.CanSSA(t) ->
(StructSelect [i] x:(Load <t> ptr mem)) && !fe.CanSSA(t) ->
@x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
(Store _ (StructMake0) mem) -> mem
......@@ -832,9 +832,9 @@
(StructSelect [0] x:(IData _)) -> x
// un-SSAable values use mem->mem copies
(Store {t} dst (Load src mem) mem) && !config.fe.CanSSA(t.(Type)) ->
(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t.(Type)) ->
(Move {t} [t.(Type).Size()] dst src mem)
(Store {t} dst (Load src mem) (VarDef {x} mem)) && !config.fe.CanSSA(t.(Type)) ->
(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t.(Type)) ->
(Move {t} [t.(Type).Size()] dst src (VarDef {x} mem))
// array ops
......@@ -843,7 +843,7 @@
(Load <t> _ _) && t.IsArray() && t.NumElem() == 0 ->
(ArrayMake0)
(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t) ->
(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) ->
(ArrayMake1 (Load <t.ElemType()> ptr mem))
(Store _ (ArrayMake0) mem) -> mem
......@@ -862,19 +862,19 @@
(StringPtr (StringMake (Const64 <t> [c]) _)) -> (Const64 <t> [c])
(StringLen (StringMake _ (Const64 <t> [c]))) -> (Const64 <t> [c])
(ConstString {s}) && config.PtrSize == 4 && s.(string) == "" ->
(StringMake (ConstNil) (Const32 <config.fe.TypeInt()> [0]))
(StringMake (ConstNil) (Const32 <fe.TypeInt()> [0]))
(ConstString {s}) && config.PtrSize == 8 && s.(string) == "" ->
(StringMake (ConstNil) (Const64 <config.fe.TypeInt()> [0]))
(StringMake (ConstNil) (Const64 <fe.TypeInt()> [0]))
(ConstString {s}) && config.PtrSize == 4 && s.(string) != "" ->
(StringMake
(Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}
(Addr <fe.TypeBytePtr()> {fe.StringData(s.(string))}
(SB))
(Const32 <config.fe.TypeInt()> [int64(len(s.(string)))]))
(Const32 <fe.TypeInt()> [int64(len(s.(string)))]))
(ConstString {s}) && config.PtrSize == 8 && s.(string) != "" ->
(StringMake
(Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}
(Addr <fe.TypeBytePtr()> {fe.StringData(s.(string))}
(SB))
(Const64 <config.fe.TypeInt()> [int64(len(s.(string)))]))
(Const64 <fe.TypeInt()> [int64(len(s.(string)))]))
// slice ops
// Only a few slice rules are provided here. See dec.rules for
......@@ -890,19 +890,19 @@
(ConstSlice) && config.PtrSize == 4 ->
(SliceMake
(ConstNil <v.Type.ElemType().PtrTo()>)
(Const32 <config.fe.TypeInt()> [0])
(Const32 <config.fe.TypeInt()> [0]))
(Const32 <fe.TypeInt()> [0])
(Const32 <fe.TypeInt()> [0]))
(ConstSlice) && config.PtrSize == 8 ->
(SliceMake
(ConstNil <v.Type.ElemType().PtrTo()>)
(Const64 <config.fe.TypeInt()> [0])
(Const64 <config.fe.TypeInt()> [0]))
(Const64 <fe.TypeInt()> [0])
(Const64 <fe.TypeInt()> [0]))
// interface ops
(ConstInterface) ->
(IMake
(ConstNil <config.fe.TypeBytePtr()>)
(ConstNil <config.fe.TypeBytePtr()>))
(ConstNil <fe.TypeBytePtr()>)
(ConstNil <fe.TypeBytePtr()>))
(NilCheck (GetG mem) mem) -> mem
......@@ -918,45 +918,45 @@
// Decompose compound argument values
(Arg {n} [off]) && v.Type.IsString() ->
(StringMake
(Arg <config.fe.TypeBytePtr()> {n} [off])
(Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
(Arg <fe.TypeBytePtr()> {n} [off])
(Arg <fe.TypeInt()> {n} [off+config.PtrSize]))
(Arg {n} [off]) && v.Type.IsSlice() ->
(SliceMake
(Arg <v.Type.ElemType().PtrTo()> {n} [off])
(Arg <config.fe.TypeInt()> {n} [off+config.PtrSize])
(Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
(Arg <fe.TypeInt()> {n} [off+config.PtrSize])
(Arg <fe.TypeInt()> {n} [off+2*config.PtrSize]))
(Arg {n} [off]) && v.Type.IsInterface() ->
(IMake
(Arg <config.fe.TypeBytePtr()> {n} [off])
(Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
(Arg <fe.TypeBytePtr()> {n} [off])
(Arg <fe.TypeBytePtr()> {n} [off+config.PtrSize]))
(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 ->
(ComplexMake
(Arg <config.fe.TypeFloat64()> {n} [off])
(Arg <config.fe.TypeFloat64()> {n} [off+8]))
(Arg <fe.TypeFloat64()> {n} [off])
(Arg <fe.TypeFloat64()> {n} [off+8]))
(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 ->
(ComplexMake
(Arg <config.fe.TypeFloat32()> {n} [off])
(Arg <config.fe.TypeFloat32()> {n} [off+4]))
(Arg <fe.TypeFloat32()> {n} [off])
(Arg <fe.TypeFloat32()> {n} [off+4]))
(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t) ->
(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) ->
(StructMake0)
(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t) ->
(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) ->
(StructMake1
(Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t) ->
(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) ->
(StructMake2
(Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])
(Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t) ->
(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) ->
(StructMake3
(Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])
(Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])
(Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t) ->
(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) ->
(StructMake4
(Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])
(Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])
......@@ -965,132 +965,132 @@
(Arg <t>) && t.IsArray() && t.NumElem() == 0 ->
(ArrayMake0)
(Arg <t> {n} [off]) && t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t) ->
(Arg <t> {n} [off]) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) ->
(ArrayMake1 (Arg <t.ElemType()> {n} [off]))
// strength reduction of divide by a constant.
// See ../magic.go for a detailed description of these algorithms.
// Unsigned divide by power of 2. Strength reduce to a shift.
(Div8u n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xff)]))
(Div16u n (Const16 [c])) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xffff)]))
(Div32u n (Const32 [c])) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xffffffff)]))
(Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Div8u n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xff)]))
(Div16u n (Const16 [c])) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xffff)]))
(Div32u n (Const32 [c])) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xffffffff)]))
(Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <fe.TypeUInt64()> [log2(c)]))
// Unsigned divide, not a power of 2. Strength reduce to a multiply.
// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply.
(Div8u x (Const8 [c])) && umagicOK(8, c) ->
(Trunc32to8
(Rsh32Ux64 <config.fe.TypeUInt32()>
(Mul32 <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(1<<8+umagic(8,c).m)])
(Rsh32Ux64 <fe.TypeUInt32()>
(Mul32 <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(1<<8+umagic(8,c).m)])
(ZeroExt8to32 x))
(Const64 <config.fe.TypeUInt64()> [8+umagic(8,c).s])))
(Const64 <fe.TypeUInt64()> [8+umagic(8,c).s])))
// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply.
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 8 ->
(Trunc64to16
(Rsh64Ux64 <config.fe.TypeUInt64()>
(Mul64 <config.fe.TypeUInt64()>
(Const64 <config.fe.TypeUInt64()> [int64(1<<16+umagic(16,c).m)])
(Rsh64Ux64 <fe.TypeUInt64()>
(Mul64 <fe.TypeUInt64()>
(Const64 <fe.TypeUInt64()> [int64(1<<16+umagic(16,c).m)])
(ZeroExt16to64 x))
(Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s])))
(Const64 <fe.TypeUInt64()> [16+umagic(16,c).s])))
// For 16-bit divides on 32-bit machines
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0 ->
(Trunc32to16
(Rsh32Ux64 <config.fe.TypeUInt32()>
(Mul32 <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(1<<15+umagic(16,c).m/2)])
(Rsh32Ux64 <fe.TypeUInt32()>
(Mul32 <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(1<<15+umagic(16,c).m/2)])
(ZeroExt16to32 x))
(Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-1])))
(Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-1])))
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && c&1 == 0 ->
(Trunc32to16
(Rsh32Ux64 <config.fe.TypeUInt32()>
(Mul32 <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(1<<15+(umagic(16,c).m+1)/2)])
(Rsh32Ux64 <config.fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <config.fe.TypeUInt64()> [1])))
(Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-2])))
(Rsh32Ux64 <fe.TypeUInt32()>
(Mul32 <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(1<<15+(umagic(16,c).m+1)/2)])
(Rsh32Ux64 <fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <fe.TypeUInt64()> [1])))
(Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-2])))
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 ->
(Trunc32to16
(Rsh32Ux64 <config.fe.TypeUInt32()>
(Rsh32Ux64 <fe.TypeUInt32()>
(Avg32u
(Lsh32x64 <config.fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <config.fe.TypeUInt64()> [16]))
(Mul32 <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(umagic(16,c).m)])
(Lsh32x64 <fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <fe.TypeUInt64()> [16]))
(Mul32 <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(umagic(16,c).m)])
(ZeroExt16to32 x)))
(Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-1])))
(Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-1])))
// For 32-bit divides on 32-bit machines
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0 ->
(Rsh32Ux64 <config.fe.TypeUInt32()>
(Hmul32u <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(int32(1<<31+umagic(32,c).m/2))])
(Rsh32Ux64 <fe.TypeUInt32()>
(Hmul32u <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(int32(1<<31+umagic(32,c).m/2))])
x)
(Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-1]))
(Const64 <fe.TypeUInt64()> [umagic(32,c).s-1]))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && c&1 == 0 ->
(Rsh32Ux64 <config.fe.TypeUInt32()>
(Hmul32u <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
(Rsh32Ux64 <config.fe.TypeUInt32()> x (Const64 <config.fe.TypeUInt64()> [1])))
(Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-2]))
(Rsh32Ux64 <fe.TypeUInt32()>
(Hmul32u <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
(Rsh32Ux64 <fe.TypeUInt32()> x (Const64 <fe.TypeUInt64()> [1])))
(Const64 <fe.TypeUInt64()> [umagic(32,c).s-2]))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 ->
(Rsh32Ux64 <config.fe.TypeUInt32()>
(Rsh32Ux64 <fe.TypeUInt32()>
(Avg32u
x
(Hmul32u <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(int32(umagic(32,c).m))])
(Hmul32u <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(int32(umagic(32,c).m))])
x))
(Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-1]))
(Const64 <fe.TypeUInt64()> [umagic(32,c).s-1]))
// For 32-bit divides on 64-bit machines
// We'll use a regular (non-hi) multiply for this case.
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0 ->
(Trunc64to32
(Rsh64Ux64 <config.fe.TypeUInt64()>
(Mul64 <config.fe.TypeUInt64()>
(Const64 <config.fe.TypeUInt64()> [int64(1<<31+umagic(32,c).m/2)])
(Rsh64Ux64 <fe.TypeUInt64()>
(Mul64 <fe.TypeUInt64()>
(Const64 <fe.TypeUInt64()> [int64(1<<31+umagic(32,c).m/2)])
(ZeroExt32to64 x))
(Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-1])))
(Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-1])))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && c&1 == 0 ->
(Trunc64to32
(Rsh64Ux64 <config.fe.TypeUInt64()>
(Mul64 <config.fe.TypeUInt64()>
(Const64 <config.fe.TypeUInt64()> [int64(1<<31+(umagic(32,c).m+1)/2)])
(Rsh64Ux64 <config.fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <config.fe.TypeUInt64()> [1])))
(Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-2])))
(Rsh64Ux64 <fe.TypeUInt64()>
(Mul64 <fe.TypeUInt64()>
(Const64 <fe.TypeUInt64()> [int64(1<<31+(umagic(32,c).m+1)/2)])
(Rsh64Ux64 <fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <fe.TypeUInt64()> [1])))
(Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-2])))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 ->
(Trunc64to32
(Rsh64Ux64 <config.fe.TypeUInt64()>
(Rsh64Ux64 <fe.TypeUInt64()>
(Avg64u
(Lsh64x64 <config.fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <config.fe.TypeUInt64()> [32]))
(Mul64 <config.fe.TypeUInt64()>
(Const64 <config.fe.TypeUInt32()> [int64(umagic(32,c).m)])
(Lsh64x64 <fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <fe.TypeUInt64()> [32]))
(Mul64 <fe.TypeUInt64()>
(Const64 <fe.TypeUInt32()> [int64(umagic(32,c).m)])
(ZeroExt32to64 x)))
(Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-1])))
(Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-1])))
// For 64-bit divides on 64-bit machines
// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0 ->
(Rsh64Ux64 <config.fe.TypeUInt64()>
(Hmul64u <config.fe.TypeUInt64()>
(Const64 <config.fe.TypeUInt64()> [int64(1<<63+umagic(64,c).m/2)])
(Rsh64Ux64 <fe.TypeUInt64()>
(Hmul64u <fe.TypeUInt64()>
(Const64 <fe.TypeUInt64()> [int64(1<<63+umagic(64,c).m/2)])
x)
(Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-1]))
(Const64 <fe.TypeUInt64()> [umagic(64,c).s-1]))
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && c&1 == 0 ->
(Rsh64Ux64 <config.fe.TypeUInt64()>
(Hmul64u <config.fe.TypeUInt64()>
(Const64 <config.fe.TypeUInt64()> [int64(1<<63+(umagic(64,c).m+1)/2)])
(Rsh64Ux64 <config.fe.TypeUInt64()> x (Const64 <config.fe.TypeUInt64()> [1])))
(Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-2]))
(Rsh64Ux64 <fe.TypeUInt64()>
(Hmul64u <fe.TypeUInt64()>
(Const64 <fe.TypeUInt64()> [int64(1<<63+(umagic(64,c).m+1)/2)])
(Rsh64Ux64 <fe.TypeUInt64()> x (Const64 <fe.TypeUInt64()> [1])))
(Const64 <fe.TypeUInt64()> [umagic(64,c).s-2]))
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 ->
(Rsh64Ux64 <config.fe.TypeUInt64()>
(Rsh64Ux64 <fe.TypeUInt64()>
(Avg64u
x
(Hmul64u <config.fe.TypeUInt64()>
(Const64 <config.fe.TypeUInt64()> [int64(umagic(64,c).m)])
(Hmul64u <fe.TypeUInt64()>
(Const64 <fe.TypeUInt64()> [int64(umagic(64,c).m)])
x))
(Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-1]))
(Const64 <fe.TypeUInt64()> [umagic(64,c).s-1]))
// Signed divide by a negative constant. Rewrite to divide by a positive constant.
(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 -> (Neg8 (Div8 <t> n (Const8 <t> [-c])))
......@@ -1101,10 +1101,10 @@
// Dividing by the most-negative number. Result is always 0 except
// if the input is also the most-negative number.
// We can detect that using the sign bit of x & -x.
(Div8 <t> x (Const8 [-1<<7 ])) -> (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <config.fe.TypeUInt64()> [7 ]))
(Div16 <t> x (Const16 [-1<<15])) -> (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <config.fe.TypeUInt64()> [15]))
(Div32 <t> x (Const32 [-1<<31])) -> (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <config.fe.TypeUInt64()> [31]))
(Div64 <t> x (Const64 [-1<<63])) -> (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <config.fe.TypeUInt64()> [63]))
(Div8 <t> x (Const8 [-1<<7 ])) -> (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <fe.TypeUInt64()> [7 ]))
(Div16 <t> x (Const16 [-1<<15])) -> (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <fe.TypeUInt64()> [15]))
(Div32 <t> x (Const32 [-1<<31])) -> (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <fe.TypeUInt64()> [31]))
(Div64 <t> x (Const64 [-1<<63])) -> (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <fe.TypeUInt64()> [63]))
// Signed divide by power of 2.
// n / c = n >> log(c) if n >= 0
......@@ -1112,96 +1112,96 @@
// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned).
(Div8 <t> n (Const8 [c])) && isPowerOfTwo(c) ->
(Rsh8x64
(Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [ 7])) (Const64 <config.fe.TypeUInt64()> [ 8-log2(c)])))
(Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <fe.TypeUInt64()> [ 7])) (Const64 <fe.TypeUInt64()> [ 8-log2(c)])))
(Const64 <fe.TypeUInt64()> [log2(c)]))
(Div16 <t> n (Const16 [c])) && isPowerOfTwo(c) ->
(Rsh16x64
(Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [15])) (Const64 <config.fe.TypeUInt64()> [16-log2(c)])))
(Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <fe.TypeUInt64()> [15])) (Const64 <fe.TypeUInt64()> [16-log2(c)])))
(Const64 <fe.TypeUInt64()> [log2(c)]))
(Div32 <t> n (Const32 [c])) && isPowerOfTwo(c) ->
(Rsh32x64
(Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [31])) (Const64 <config.fe.TypeUInt64()> [32-log2(c)])))
(Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <fe.TypeUInt64()> [31])) (Const64 <fe.TypeUInt64()> [32-log2(c)])))
(Const64 <fe.TypeUInt64()> [log2(c)]))
(Div64 <t> n (Const64 [c])) && isPowerOfTwo(c) ->
(Rsh64x64
(Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [63])) (Const64 <config.fe.TypeUInt64()> [64-log2(c)])))
(Const64 <config.fe.TypeUInt64()> [log2(c)]))
(Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <fe.TypeUInt64()> [63])) (Const64 <fe.TypeUInt64()> [64-log2(c)])))
(Const64 <fe.TypeUInt64()> [log2(c)]))
// Signed divide, not a power of 2. Strength reduce to a multiply.
(Div8 <t> x (Const8 [c])) && smagicOK(8,c) ->
(Sub8 <t>
(Rsh32x64 <t>
(Mul32 <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(smagic(8,c).m)])
(Mul32 <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(smagic(8,c).m)])
(SignExt8to32 x))
(Const64 <config.fe.TypeUInt64()> [8+smagic(8,c).s]))
(Const64 <fe.TypeUInt64()> [8+smagic(8,c).s]))
(Rsh32x64 <t>
(SignExt8to32 x)
(Const64 <config.fe.TypeUInt64()> [31])))
(Const64 <fe.TypeUInt64()> [31])))
(Div16 <t> x (Const16 [c])) && smagicOK(16,c) ->
(Sub16 <t>
(Rsh32x64 <t>
(Mul32 <config.fe.TypeUInt32()>
(Const32 <config.fe.TypeUInt32()> [int64(smagic(16,c).m)])
(Mul32 <fe.TypeUInt32()>
(Const32 <fe.TypeUInt32()> [int64(smagic(16,c).m)])
(SignExt16to32 x))
(Const64 <config.fe.TypeUInt64()> [16+smagic(16,c).s]))
(Const64 <fe.TypeUInt64()> [16+smagic(16,c).s]))
(Rsh32x64 <t>
(SignExt16to32 x)
(Const64 <config.fe.TypeUInt64()> [31])))
(Const64 <fe.TypeUInt64()> [31])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 8 ->
(Sub32 <t>
(Rsh64x64 <t>
(Mul64 <config.fe.TypeUInt64()>
(Const64 <config.fe.TypeUInt64()> [int64(smagic(32,c).m)])
(Mul64 <fe.TypeUInt64()>
(Const64 <fe.TypeUInt64()> [int64(smagic(32,c).m)])
(SignExt32to64 x))
(Const64 <config.fe.TypeUInt64()> [32+smagic(32,c).s]))
(Const64 <fe.TypeUInt64()> [32+smagic(32,c).s]))
(Rsh64x64 <t>
(SignExt32to64 x)
(Const64 <config.fe.TypeUInt64()> [63])))
(Const64 <fe.TypeUInt64()> [63])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 == 0 ->
(Sub32 <t>
(Rsh32x64 <t>
(Hmul32 <t>
(Const32 <config.fe.TypeUInt32()> [int64(int32(smagic(32,c).m/2))])
(Const32 <fe.TypeUInt32()> [int64(int32(smagic(32,c).m/2))])
x)
(Const64 <config.fe.TypeUInt64()> [smagic(32,c).s-1]))
(Const64 <fe.TypeUInt64()> [smagic(32,c).s-1]))
(Rsh32x64 <t>
x
(Const64 <config.fe.TypeUInt64()> [31])))
(Const64 <fe.TypeUInt64()> [31])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 != 0 ->
(Sub32 <t>
(Rsh32x64 <t>
(Add32 <t>
(Hmul32 <t>
(Const32 <config.fe.TypeUInt32()> [int64(int32(smagic(32,c).m))])
(Const32 <fe.TypeUInt32()> [int64(int32(smagic(32,c).m))])
x)
x)
(Const64 <config.fe.TypeUInt64()> [smagic(32,c).s]))
(Const64 <fe.TypeUInt64()> [smagic(32,c).s]))
(Rsh32x64 <t>
x
(Const64 <config.fe.TypeUInt64()> [31])))
(Const64 <fe.TypeUInt64()> [31])))
(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 == 0 ->
(Sub64 <t>
(Rsh64x64 <t>
(Hmul64 <t>
(Const64 <config.fe.TypeUInt64()> [int64(smagic(64,c).m/2)])
(Const64 <fe.TypeUInt64()> [int64(smagic(64,c).m/2)])
x)
(Const64 <config.fe.TypeUInt64()> [smagic(64,c).s-1]))
(Const64 <fe.TypeUInt64()> [smagic(64,c).s-1]))
(Rsh64x64 <t>
x
(Const64 <config.fe.TypeUInt64()> [63])))
(Const64 <fe.TypeUInt64()> [63])))
(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 != 0 ->
(Sub64 <t>
(Rsh64x64 <t>
(Add64 <t>
(Hmul64 <t>
(Const64 <config.fe.TypeUInt64()> [int64(smagic(64,c).m)])
(Const64 <fe.TypeUInt64()> [int64(smagic(64,c).m)])
x)
x)
(Const64 <config.fe.TypeUInt64()> [smagic(64,c).s]))
(Const64 <fe.TypeUInt64()> [smagic(64,c).s]))
(Rsh64x64 <t>
x
(Const64 <config.fe.TypeUInt64()> [63])))
(Const64 <fe.TypeUInt64()> [63])))
// Unsigned mod by power of 2 constant.
(Mod8u <t> n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (And8 n (Const8 <t> [(c&0xff)-1]))
......@@ -1423,13 +1423,13 @@
&& mem.Op == OpStaticCall
&& isSameSym(mem.Aux, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
&& warnRule(config.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
&& warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
-> (Invalid)
(NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem)) mem)
&& mem.Op == OpStaticCall
&& isSameSym(mem.Aux, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
&& warnRule(config.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
&& warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
-> (Invalid)
// De-virtualize interface calls into static calls.
......
......@@ -156,11 +156,11 @@ func genRules(arch arch) {
fmt.Fprintln(w, "var _ = math.MinInt8 // in case not otherwise used")
// Main rewrite routine is a switch on v.Op.
fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name)
fmt.Fprintf(w, "func rewriteValue%s(v *Value) bool {\n", arch.name)
fmt.Fprintf(w, "switch v.Op {\n")
for _, op := range ops {
fmt.Fprintf(w, "case %s:\n", op)
fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, op)
fmt.Fprintf(w, "return rewriteValue%s_%s(v)\n", arch.name, op)
}
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return false\n")
......@@ -169,47 +169,71 @@ func genRules(arch arch) {
// Generate a routine per op. Note that we don't make one giant routine
// because it is too big for some compilers.
for _, op := range ops {
fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, op)
fmt.Fprintln(w, "b := v.Block")
fmt.Fprintln(w, "_ = b")
buf := new(bytes.Buffer)
var canFail bool
for i, rule := range oprules[op] {
match, cond, result := rule.parse()
fmt.Fprintf(w, "// match: %s\n", match)
fmt.Fprintf(w, "// cond: %s\n", cond)
fmt.Fprintf(w, "// result: %s\n", result)
fmt.Fprintf(buf, "// match: %s\n", match)
fmt.Fprintf(buf, "// cond: %s\n", cond)
fmt.Fprintf(buf, "// result: %s\n", result)
canFail = false
fmt.Fprintf(w, "for {\n")
if genMatch(w, arch, match, rule.loc) {
fmt.Fprintf(buf, "for {\n")
if genMatch(buf, arch, match, rule.loc) {
canFail = true
}
if cond != "" {
fmt.Fprintf(w, "if !(%s) {\nbreak\n}\n", cond)
fmt.Fprintf(buf, "if !(%s) {\nbreak\n}\n", cond)
canFail = true
}
if !canFail && i != len(oprules[op])-1 {
log.Fatalf("unconditional rule %s is followed by other rules", match)
}
genResult(w, arch, result, rule.loc)
genResult(buf, arch, result, rule.loc)
if *genLog {
fmt.Fprintf(w, "logRule(\"%s\")\n", rule.loc)
fmt.Fprintf(buf, "logRule(\"%s\")\n", rule.loc)
}
fmt.Fprintf(w, "return true\n")
fmt.Fprintf(buf, "return true\n")
fmt.Fprintf(w, "}\n")
fmt.Fprintf(buf, "}\n")
}
if canFail {
fmt.Fprintf(w, "return false\n")
}
fmt.Fprintf(buf, "return false\n")
}
body := buf.String()
// Do a rough match to predict whether we need b, config, and/or fe.
// It's not precise--thus the blank assignments--but it's good enough
// to avoid generating needless code and doing pointless nil checks.
hasb := strings.Contains(body, "b.")
hasconfig := strings.Contains(body, "config.")
hasfe := strings.Contains(body, "fe.")
fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value) bool {\n", arch.name, op)
if hasb || hasconfig || hasfe {
fmt.Fprintln(w, "b := v.Block")
fmt.Fprintln(w, "_ = b")
}
if hasconfig || hasfe {
fmt.Fprintln(w, "config := b.Func.Config")
fmt.Fprintln(w, "_ = config")
}
if hasfe {
fmt.Fprintln(w, "fe := config.fe")
fmt.Fprintln(w, "_ = fe")
}
fmt.Fprint(w, body)
fmt.Fprintf(w, "}\n")
}
// Generate block rewrite function. There are only a few block types
// so we can make this one function with a switch.
fmt.Fprintf(w, "func rewriteBlock%s(b *Block, config *Config) bool {\n", arch.name)
fmt.Fprintf(w, "func rewriteBlock%s(b *Block) bool {\n", arch.name)
fmt.Fprintln(w, "config := b.Func.Config")
fmt.Fprintln(w, "_ = config")
fmt.Fprintln(w, "fe := config.fe")
fmt.Fprintln(w, "_ = fe")
fmt.Fprintf(w, "switch b.Kind {\n")
ops = nil
for op := range blockrules {
......@@ -695,7 +719,7 @@ func typeName(typ string) string {
case "Flags", "Mem", "Void", "Int128":
return "Type" + typ
default:
return "config.fe.Type" + typ + "()"
return "fe.Type" + typ + "()"
}
}
......
......@@ -14,7 +14,7 @@ import (
"strings"
)
func applyRewrite(f *Func, rb func(*Block, *Config) bool, rv func(*Value, *Config) bool) {
func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) {
// repeat rewrites until we find no more rewrites
var curb *Block
var curv *Value
......@@ -27,7 +27,6 @@ func applyRewrite(f *Func, rb func(*Block, *Config) bool, rv func(*Value, *Confi
// TODO(khr): print source location also
}
}()
config := f.Config
for {
change := false
for _, b := range f.Blocks {
......@@ -37,7 +36,7 @@ func applyRewrite(f *Func, rb func(*Block, *Config) bool, rv func(*Value, *Confi
}
}
curb = b
if rb(b, config) {
if rb(b) {
change = true
}
curb = nil
......@@ -66,7 +65,7 @@ func applyRewrite(f *Func, rb func(*Block, *Config) bool, rv func(*Value, *Confi
// apply rewrite function
curv = v
if rv(v, config) {
if rv(v) {
change = true
}
curv = nil
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -6,36 +6,34 @@ package ssa
import "math"
var _ = math.MinInt8 // in case not otherwise used
func rewriteValuedec(v *Value, config *Config) bool {
func rewriteValuedec(v *Value) bool {
switch v.Op {
case OpComplexImag:
return rewriteValuedec_OpComplexImag(v, config)
return rewriteValuedec_OpComplexImag(v)
case OpComplexReal:
return rewriteValuedec_OpComplexReal(v, config)
return rewriteValuedec_OpComplexReal(v)
case OpIData:
return rewriteValuedec_OpIData(v, config)
return rewriteValuedec_OpIData(v)
case OpITab:
return rewriteValuedec_OpITab(v, config)
return rewriteValuedec_OpITab(v)
case OpLoad:
return rewriteValuedec_OpLoad(v, config)
return rewriteValuedec_OpLoad(v)
case OpSliceCap:
return rewriteValuedec_OpSliceCap(v, config)
return rewriteValuedec_OpSliceCap(v)
case OpSliceLen:
return rewriteValuedec_OpSliceLen(v, config)
return rewriteValuedec_OpSliceLen(v)
case OpSlicePtr:
return rewriteValuedec_OpSlicePtr(v, config)
return rewriteValuedec_OpSlicePtr(v)
case OpStore:
return rewriteValuedec_OpStore(v, config)
return rewriteValuedec_OpStore(v)
case OpStringLen:
return rewriteValuedec_OpStringLen(v, config)
return rewriteValuedec_OpStringLen(v)
case OpStringPtr:
return rewriteValuedec_OpStringPtr(v, config)
return rewriteValuedec_OpStringPtr(v)
}
return false
}
func rewriteValuedec_OpComplexImag(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValuedec_OpComplexImag(v *Value) bool {
// match: (ComplexImag (ComplexMake _ imag ))
// cond:
// result: imag
......@@ -52,9 +50,7 @@ func rewriteValuedec_OpComplexImag(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpComplexReal(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValuedec_OpComplexReal(v *Value) bool {
// match: (ComplexReal (ComplexMake real _ ))
// cond:
// result: real
......@@ -71,9 +67,7 @@ func rewriteValuedec_OpComplexReal(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpIData(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValuedec_OpIData(v *Value) bool {
// match: (IData (IMake _ data))
// cond:
// result: data
......@@ -90,7 +84,7 @@ func rewriteValuedec_OpIData(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpITab(v *Value, config *Config) bool {
func rewriteValuedec_OpITab(v *Value) bool {
b := v.Block
_ = b
// match: (ITab (IMake itab _))
......@@ -109,12 +103,16 @@ func rewriteValuedec_OpITab(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
func rewriteValuedec_OpLoad(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
fe := config.fe
_ = fe
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 8
// result: (ComplexMake (Load <config.fe.TypeFloat32()> ptr mem) (Load <config.fe.TypeFloat32()> (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr) mem) )
// result: (ComplexMake (Load <fe.TypeFloat32()> ptr mem) (Load <fe.TypeFloat32()> (OffPtr <fe.TypeFloat32().PtrTo()> [4] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
......@@ -123,12 +121,12 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeFloat32())
v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat32())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeFloat32())
v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeFloat32().PtrTo())
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat32())
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat32().PtrTo())
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
......@@ -138,7 +136,7 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 16
// result: (ComplexMake (Load <config.fe.TypeFloat64()> ptr mem) (Load <config.fe.TypeFloat64()> (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr) mem) )
// result: (ComplexMake (Load <fe.TypeFloat64()> ptr mem) (Load <fe.TypeFloat64()> (OffPtr <fe.TypeFloat64().PtrTo()> [8] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
......@@ -147,12 +145,12 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeFloat64())
v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat64())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeFloat64())
v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeFloat64().PtrTo())
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat64())
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat64().PtrTo())
v2.AuxInt = 8
v2.AddArg(ptr)
v1.AddArg(v2)
......@@ -162,7 +160,7 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsString()
// result: (StringMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem))
// result: (StringMake (Load <fe.TypeBytePtr()> ptr mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
......@@ -171,12 +169,12 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
break
}
v.reset(OpStringMake)
v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeBytePtr())
v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeInt())
v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
......@@ -186,7 +184,7 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsSlice()
// result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr) mem))
// result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
......@@ -199,15 +197,15 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeInt())
v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeInt())
v4 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
v3 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
v4 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v4.AuxInt = 2 * config.PtrSize
v4.AddArg(ptr)
v3.AddArg(v4)
......@@ -217,7 +215,7 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsInterface()
// result: (IMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeBytePtr()> (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr) mem))
// result: (IMake (Load <fe.TypeBytePtr()> ptr mem) (Load <fe.TypeBytePtr()> (OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
......@@ -226,12 +224,12 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
break
}
v.reset(OpIMake)
v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeBytePtr())
v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeBytePtr())
v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeBytePtr().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
......@@ -241,9 +239,7 @@ func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpSliceCap(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValuedec_OpSliceCap(v *Value) bool {
// match: (SliceCap (SliceMake _ _ cap))
// cond:
// result: cap
......@@ -260,9 +256,7 @@ func rewriteValuedec_OpSliceCap(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpSliceLen(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValuedec_OpSliceLen(v *Value) bool {
// match: (SliceLen (SliceMake _ len _))
// cond:
// result: len
......@@ -279,9 +273,7 @@ func rewriteValuedec_OpSliceLen(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpSlicePtr(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValuedec_OpSlicePtr(v *Value) bool {
// match: (SlicePtr (SliceMake ptr _ _ ))
// cond:
// result: ptr
......@@ -298,12 +290,16 @@ func rewriteValuedec_OpSlicePtr(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpStore(v *Value, config *Config) bool {
func rewriteValuedec_OpStore(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
fe := config.fe
_ = fe
// match: (Store {t} dst (ComplexMake real imag) mem)
// cond: t.(Type).Size() == 8
// result: (Store {config.fe.TypeFloat32()} (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst) imag (Store {config.fe.TypeFloat32()} dst real mem))
// result: (Store {fe.TypeFloat32()} (OffPtr <fe.TypeFloat32().PtrTo()> [4] dst) imag (Store {fe.TypeFloat32()} dst real mem))
for {
t := v.Aux
dst := v.Args[0]
......@@ -318,14 +314,14 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
break
}
v.reset(OpStore)
v.Aux = config.fe.TypeFloat32()
v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeFloat32().PtrTo())
v.Aux = fe.TypeFloat32()
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat32().PtrTo())
v0.AuxInt = 4
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
v1.Aux = config.fe.TypeFloat32()
v1.Aux = fe.TypeFloat32()
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
......@@ -334,7 +330,7 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
}
// match: (Store {t} dst (ComplexMake real imag) mem)
// cond: t.(Type).Size() == 16
// result: (Store {config.fe.TypeFloat64()} (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst) imag (Store {config.fe.TypeFloat64()} dst real mem))
// result: (Store {fe.TypeFloat64()} (OffPtr <fe.TypeFloat64().PtrTo()> [8] dst) imag (Store {fe.TypeFloat64()} dst real mem))
for {
t := v.Aux
dst := v.Args[0]
......@@ -349,14 +345,14 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
break
}
v.reset(OpStore)
v.Aux = config.fe.TypeFloat64()
v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeFloat64().PtrTo())
v.Aux = fe.TypeFloat64()
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat64().PtrTo())
v0.AuxInt = 8
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
v1.Aux = config.fe.TypeFloat64()
v1.Aux = fe.TypeFloat64()
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
......@@ -365,7 +361,7 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
}
// match: (Store dst (StringMake ptr len) mem)
// cond:
// result: (Store {config.fe.TypeInt()} (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {config.fe.TypeBytePtr()} dst ptr mem))
// result: (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {fe.TypeBytePtr()} dst ptr mem))
for {
dst := v.Args[0]
v_1 := v.Args[1]
......@@ -376,14 +372,14 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
len := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.Aux = config.fe.TypeInt()
v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
v.Aux = fe.TypeInt()
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(len)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
v1.Aux = config.fe.TypeBytePtr()
v1.Aux = fe.TypeBytePtr()
v1.AddArg(dst)
v1.AddArg(ptr)
v1.AddArg(mem)
......@@ -392,7 +388,7 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
}
// match: (Store dst (SliceMake ptr len cap) mem)
// cond:
// result: (Store {config.fe.TypeInt()} (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst) cap (Store {config.fe.TypeInt()} (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {config.fe.TypeBytePtr()} dst ptr mem)))
// result: (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] dst) cap (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {fe.TypeBytePtr()} dst ptr mem)))
for {
dst := v.Args[0]
v_1 := v.Args[1]
......@@ -404,21 +400,21 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
cap := v_1.Args[2]
mem := v.Args[2]
v.reset(OpStore)
v.Aux = config.fe.TypeInt()
v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
v.Aux = fe.TypeInt()
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v0.AuxInt = 2 * config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(cap)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
v1.Aux = config.fe.TypeInt()
v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
v1.Aux = fe.TypeInt()
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(len)
v3 := b.NewValue0(v.Pos, OpStore, TypeMem)
v3.Aux = config.fe.TypeBytePtr()
v3.Aux = fe.TypeBytePtr()
v3.AddArg(dst)
v3.AddArg(ptr)
v3.AddArg(mem)
......@@ -428,7 +424,7 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
}
// match: (Store dst (IMake itab data) mem)
// cond:
// result: (Store {config.fe.TypeBytePtr()} (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst) data (Store {config.fe.TypeUintptr()} dst itab mem))
// result: (Store {fe.TypeBytePtr()} (OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst) data (Store {fe.TypeUintptr()} dst itab mem))
for {
dst := v.Args[0]
v_1 := v.Args[1]
......@@ -439,14 +435,14 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
data := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.Aux = config.fe.TypeBytePtr()
v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
v.Aux = fe.TypeBytePtr()
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeBytePtr().PtrTo())
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(data)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
v1.Aux = config.fe.TypeUintptr()
v1.Aux = fe.TypeUintptr()
v1.AddArg(dst)
v1.AddArg(itab)
v1.AddArg(mem)
......@@ -455,9 +451,7 @@ func rewriteValuedec_OpStore(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpStringLen(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValuedec_OpStringLen(v *Value) bool {
// match: (StringLen (StringMake _ len))
// cond:
// result: len
......@@ -474,9 +468,7 @@ func rewriteValuedec_OpStringLen(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpStringPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValuedec_OpStringPtr(v *Value) bool {
// match: (StringPtr (StringMake ptr _))
// cond:
// result: ptr
......@@ -493,7 +485,11 @@ func rewriteValuedec_OpStringPtr(v *Value, config *Config) bool {
}
return false
}
func rewriteBlockdec(b *Block, config *Config) bool {
func rewriteBlockdec(b *Block) bool {
config := b.Func.Config
_ = config
fe := config.fe
_ = fe
switch b.Kind {
}
return false
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment