Commit 8dc04cbe authored by Keith Randall's avatar Keith Randall

cmd/compile: enforce 32-bit restrictions on ops

Most 64-bit x86 ops can only take a signed 32-bit constant.
Clean up our rewrite rules to enforce this restriction.

Modify the assembler to fail if the offset does not fit
in the instruction.

That last check triggers a few times on weird testing code.
Suppress those errors if the compiler itself generated errors.

Fixes #14862

Change-Id: I76559af035b38483b1e59621a8029fc66b3a5d1e
Reviewed-on: https://go-review.googlesource.com/20815Reviewed-by: default avatarJosh Bleecher Snyder <josharian@gmail.com>
parent d246eedc
......@@ -1309,6 +1309,10 @@ func funccompile(n *Node) {
breakpc = nil
Funcdepth = 0
dclcontext = PEXTERN
if nerrors != 0 {
// If we have compile errors, ignore any assembler/linker errors.
Ctxt.DiagFunc = func(string, ...interface{}) {}
}
flushdata()
obj.Flushplist(Ctxt) // convert from Prog list to machine code
}
......
......@@ -343,7 +343,8 @@
(Not x) -> (XORBconst [1] x)
(OffPtr [off] ptr) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
(Const8 [val]) -> (MOVBconst [val])
(Const16 [val]) -> (MOVWconst [val])
......@@ -565,25 +566,25 @@
(ADDQ x (ADDQconst [c] y)) -> (LEAQ1 [c] x y)
// fold ADDQ into LEAQ
(ADDQconst [c] (LEAQ [d] {s} x)) -> (LEAQ [c+d] {s} x)
(LEAQ [c] {s} (ADDQconst [d] x)) -> (LEAQ [c+d] {s} x)
(ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
(LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
(LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
(ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
(ADDQ (LEAQ [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
// fold ADDQconst into LEAQx
(ADDQconst [c] (LEAQ1 [d] {s} x y)) -> (LEAQ1 [c+d] {s} x y)
(ADDQconst [c] (LEAQ2 [d] {s} x y)) -> (LEAQ2 [c+d] {s} x y)
(ADDQconst [c] (LEAQ4 [d] {s} x y)) -> (LEAQ4 [c+d] {s} x y)
(ADDQconst [c] (LEAQ8 [d] {s} x y)) -> (LEAQ8 [c+d] {s} x y)
(LEAQ1 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
(LEAQ1 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
(LEAQ2 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
(LEAQ2 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
(LEAQ4 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
(LEAQ4 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
(LEAQ8 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
(LEAQ8 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
(ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y)
(ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y)
(ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y)
(ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y)
(LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
(LEAQ1 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
(LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
(LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
(LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
(LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
(LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
(LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
// fold shifts into LEAQx
(LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y)
......@@ -633,7 +634,7 @@
// Fold extensions and ANDs together.
(MOVBQZX (ANDBconst [c] x)) -> (ANDQconst [c & 0xff] x)
(MOVWQZX (ANDWconst [c] x)) -> (ANDQconst [c & 0xffff] x)
(MOVLQZX (ANDLconst [c] x)) -> (ANDQconst [c & 0xffffffff] x)
(MOVLQZX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x)
(MOVBQSX (ANDBconst [c] x)) && c & 0x80 == 0 -> (ANDQconst [c & 0x7f] x)
(MOVWQSX (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDQconst [c & 0x7fff] x)
(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x)
......@@ -651,21 +652,21 @@
// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
// Nevertheless, let's do it!
(MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVQload [addOff(off1, off2)] {sym} ptr mem)
(MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVLload [addOff(off1, off2)] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVWload [addOff(off1, off2)] {sym} ptr mem)
(MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVBload [addOff(off1, off2)] {sym} ptr mem)
(MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVSSload [addOff(off1, off2)] {sym} ptr mem)
(MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVSDload [addOff(off1, off2)] {sym} ptr mem)
(MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVOload [addOff(off1, off2)] {sym} ptr mem)
(MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVQstore [addOff(off1, off2)] {sym} ptr val mem)
(MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVLstore [addOff(off1, off2)] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVWstore [addOff(off1, off2)] {sym} ptr val mem)
(MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVBstore [addOff(off1, off2)] {sym} ptr val mem)
(MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVSSstore [addOff(off1, off2)] {sym} ptr val mem)
(MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVSDstore [addOff(off1, off2)] {sym} ptr val mem)
(MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVOstore [addOff(off1, off2)] {sym} ptr val mem)
(MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem)
(MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
(MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
(MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
(MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
(MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVOload [off1+off2] {sym} ptr mem)
(MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem)
(MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
(MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
(MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
(MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
(MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVOstore [off1+off2] {sym} ptr val mem)
// Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
......@@ -689,49 +690,49 @@
// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
// what variables are being read/written by the ops.
(MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVLload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVWload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVBload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVSSload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVSDload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVOload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVBQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVBQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVWQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVWQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVLQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVLQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVBQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVWQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
(MOVLQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
(MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
(MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
(MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
(MOVLstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
(MOVWstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
(MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
(MOVBstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
(MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
(MOVSSstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
(MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
(MOVSDstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
(MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
(MOVOstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
(MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVLQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
......@@ -743,31 +744,31 @@
(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
// generating indexed loads and stores
(MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVBloadidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVWloadidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVLloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVQloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVSSloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVSDloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVBstoreidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVWstoreidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVLstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVQstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVSSstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVSDstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
(MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
......@@ -830,34 +831,34 @@
(MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
// fold LEAQs together
(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && canMergeSym(sym1, sym2) ->
(LEAQ [addOff(off1,off2)] {mergeSym(sym1,sym2)} x)
(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
// LEAQ into LEAQ1
(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
(LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
(LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ1 into LEAQ
(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
(LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ into LEAQ[248]
(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ[248] into LEAQ
(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
(LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
(LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
(LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// lower Zero instructions with word sizes
(Zero [0] _ mem) -> mem
......@@ -1119,7 +1120,7 @@
(ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [c+d])
(ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [c+d])
(ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [c+d])
(ADDQconst [c] (ADDQconst [d] x)) -> (ADDQconst [c+d] x)
(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
(ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [c+d] x)
(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [c+d] x)
(ADDBconst [c] (ADDBconst [d] x)) -> (ADDBconst [c+d] x)
......@@ -1127,7 +1128,7 @@
(SUBLconst [c] (MOVLconst [d])) -> (MOVLconst [d-c])
(SUBWconst [c] (MOVWconst [d])) -> (MOVWconst [d-c])
(SUBBconst [c] (MOVBconst [d])) -> (MOVBconst [d-c])
(SUBQconst [c] (SUBQconst [d] x)) -> (ADDQconst [-c-d] x)
(SUBQconst [c] (SUBQconst [d] x)) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
(SUBLconst [c] (SUBLconst [d] x)) -> (ADDLconst [-c-d] x)
(SUBWconst [c] (SUBWconst [d] x)) -> (ADDWconst [-c-d] x)
(SUBBconst [c] (SUBBconst [d] x)) -> (ADDBconst [-c-d] x)
......
......@@ -95,16 +95,6 @@ func typeSize(t Type) int64 {
return t.Size()
}
// addOff adds two int64 offsets. Fails if wraparound happens.
func addOff(x, y int64) int64 {
z := x + y
// x and y have same sign and z has a different sign => overflow
if x^y >= 0 && x^z < 0 {
panic(fmt.Sprintf("offset overflow %d %d", x, y))
}
return z
}
// mergeSym merges two symbolic offsets. There is no real merging of
// offsets, we just pick the non-nil one.
func mergeSym(x, y interface{}) interface{} {
......
......@@ -1198,7 +1198,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
return true
}
// match: (ADDQconst [c] (LEAQ [d] {s} x))
// cond:
// cond: is32Bit(c+d)
// result: (LEAQ [c+d] {s} x)
for {
c := v.AuxInt
......@@ -1208,6 +1208,9 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
d := v.Args[0].AuxInt
s := v.Args[0].Aux
x := v.Args[0].Args[0]
if !(is32Bit(c + d)) {
break
}
v.reset(OpAMD64LEAQ)
v.AuxInt = c + d
v.Aux = s
......@@ -1215,7 +1218,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
return true
}
// match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
// cond:
// cond: is32Bit(c+d)
// result: (LEAQ1 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -1226,6 +1229,9 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
s := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(is32Bit(c + d)) {
break
}
v.reset(OpAMD64LEAQ1)
v.AuxInt = c + d
v.Aux = s
......@@ -1234,7 +1240,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
return true
}
// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
// cond:
// cond: is32Bit(c+d)
// result: (LEAQ2 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -1245,6 +1251,9 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
s := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(is32Bit(c + d)) {
break
}
v.reset(OpAMD64LEAQ2)
v.AuxInt = c + d
v.Aux = s
......@@ -1253,7 +1262,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
return true
}
// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
// cond:
// cond: is32Bit(c+d)
// result: (LEAQ4 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -1264,6 +1273,9 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
s := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(is32Bit(c + d)) {
break
}
v.reset(OpAMD64LEAQ4)
v.AuxInt = c + d
v.Aux = s
......@@ -1272,7 +1284,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
return true
}
// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
// cond:
// cond: is32Bit(c+d)
// result: (LEAQ8 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -1283,6 +1295,9 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
s := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(is32Bit(c + d)) {
break
}
v.reset(OpAMD64LEAQ8)
v.AuxInt = c + d
v.Aux = s
......@@ -1317,7 +1332,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
return true
}
// match: (ADDQconst [c] (ADDQconst [d] x))
// cond:
// cond: is32Bit(c+d)
// result: (ADDQconst [c+d] x)
for {
c := v.AuxInt
......@@ -1326,6 +1341,9 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
if !(is32Bit(c + d)) {
break
}
v.reset(OpAMD64ADDQconst)
v.AuxInt = c + d
v.AddArg(x)
......@@ -3996,7 +4014,7 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ [c] {s} (ADDQconst [d] x))
// cond:
// cond: is32Bit(c+d)
// result: (LEAQ [c+d] {s} x)
for {
c := v.AuxInt
......@@ -4006,6 +4024,9 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
if !(is32Bit(c + d)) {
break
}
v.reset(OpAMD64LEAQ)
v.AuxInt = c + d
v.Aux = s
......@@ -4034,8 +4055,8 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ [addOff(off1,off2)] {mergeSym(sym1,sym2)} x)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4045,18 +4066,18 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4067,19 +4088,19 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ1)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4090,19 +4111,19 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ2)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4113,19 +4134,19 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ4)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4136,11 +4157,11 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ8)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
......@@ -4152,7 +4173,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
// cond: x.Op != OpSB
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ1 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -4163,7 +4184,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[1]
if !(x.Op != OpSB) {
if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ1)
......@@ -4174,7 +4195,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
return true
}
// match: (LEAQ1 [c] {s} x (ADDQconst [d] y))
// cond: y.Op != OpSB
// cond: is32Bit(c+d) && y.Op != OpSB
// result: (LEAQ1 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -4185,7 +4206,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
}
d := v.Args[1].AuxInt
y := v.Args[1].Args[0]
if !(y.Op != OpSB) {
if !(is32Bit(c+d) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ1)
......@@ -4322,8 +4343,8 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
return true
}
// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4334,19 +4355,19 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ1)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y))
// cond: canMergeSym(sym1, sym2) && y.Op != OpSB
// result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4357,11 +4378,11 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
off2 := v.Args[1].AuxInt
sym2 := v.Args[1].Aux
y := v.Args[1].Args[0]
if !(canMergeSym(sym1, sym2) && y.Op != OpSB) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ1)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
......@@ -4373,7 +4394,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
// cond: x.Op != OpSB
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ2 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -4384,7 +4405,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[1]
if !(x.Op != OpSB) {
if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ2)
......@@ -4395,7 +4416,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
return true
}
// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
// cond: y.Op != OpSB
// cond: is32Bit(c+2*d) && y.Op != OpSB
// result: (LEAQ2 [c+2*d] {s} x y)
for {
c := v.AuxInt
......@@ -4406,7 +4427,7 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
}
d := v.Args[1].AuxInt
y := v.Args[1].Args[0]
if !(y.Op != OpSB) {
if !(is32Bit(c+2*d) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ2)
......@@ -4459,8 +4480,8 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
return true
}
// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4471,11 +4492,11 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ2)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
......@@ -4487,7 +4508,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
// cond: x.Op != OpSB
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ4 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -4498,7 +4519,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[1]
if !(x.Op != OpSB) {
if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ4)
......@@ -4509,7 +4530,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
return true
}
// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
// cond: y.Op != OpSB
// cond: is32Bit(c+4*d) && y.Op != OpSB
// result: (LEAQ4 [c+4*d] {s} x y)
for {
c := v.AuxInt
......@@ -4520,7 +4541,7 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
}
d := v.Args[1].AuxInt
y := v.Args[1].Args[0]
if !(y.Op != OpSB) {
if !(is32Bit(c+4*d) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ4)
......@@ -4552,8 +4573,8 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
return true
}
// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4564,11 +4585,11 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ4)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
......@@ -4580,7 +4601,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
// cond: x.Op != OpSB
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ8 [c+d] {s} x y)
for {
c := v.AuxInt
......@@ -4591,7 +4612,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[1]
if !(x.Op != OpSB) {
if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ8)
......@@ -4602,7 +4623,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
return true
}
// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
// cond: y.Op != OpSB
// cond: is32Bit(c+8*d) && y.Op != OpSB
// result: (LEAQ8 [c+8*d] {s} x y)
for {
c := v.AuxInt
......@@ -4613,7 +4634,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
}
d := v.Args[1].AuxInt
y := v.Args[1].Args[0]
if !(y.Op != OpSB) {
if !(is32Bit(c+8*d) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ8)
......@@ -4624,8 +4645,8 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
return true
}
// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -4636,11 +4657,11 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ8)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
......@@ -5625,8 +5646,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -5637,11 +5658,11 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBQSXload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
......@@ -5696,8 +5717,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZXload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -5708,11 +5729,11 @@ func rewriteValueAMD64_OpAMD64MOVBQZXload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBQZXload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
......@@ -5746,8 +5767,8 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
return true
}
// match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVBload [addOff(off1, off2)] {sym} ptr mem)
// cond: is32Bit(off1+off2)
// result: (MOVBload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -5757,16 +5778,19 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVBload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -5777,19 +5801,19 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBloadidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -5801,11 +5825,11 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -5928,8 +5952,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
return true
}
// match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVBstore [addOff(off1, off2)] {sym} ptr val mem)
// cond: is32Bit(off1+off2)
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -5940,8 +5964,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVBstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
......@@ -5971,8 +5998,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -5984,11 +6011,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
......@@ -5996,8 +6023,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBstoreidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6010,11 +6037,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -6292,8 +6319,8 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6304,11 +6331,11 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLQSXload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
......@@ -6344,16 +6371,19 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool {
return true
}
// match: (MOVLQZX (ANDLconst [c] x))
// cond:
// result: (ANDQconst [c & 0xffffffff] x)
// cond: c & 0x80000000 == 0
// result: (ANDQconst [c & 0x7fffffff] x)
for {
if v.Args[0].Op != OpAMD64ANDLconst {
break
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
if !(c&0x80000000 == 0) {
break
}
v.reset(OpAMD64ANDQconst)
v.AuxInt = c & 0xffffffff
v.AuxInt = c & 0x7fffffff
v.AddArg(x)
return true
}
......@@ -6363,8 +6393,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZXload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVLQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6375,11 +6405,11 @@ func rewriteValueAMD64_OpAMD64MOVLQZXload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLQZXload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
......@@ -6413,8 +6443,8 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
return true
}
// match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVLload [addOff(off1, off2)] {sym} ptr mem)
// cond: is32Bit(off1+off2)
// result: (MOVLload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -6424,16 +6454,19 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVLload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6444,19 +6477,19 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6468,11 +6501,11 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -6572,8 +6605,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
return true
}
// match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVLstore [addOff(off1, off2)] {sym} ptr val mem)
// cond: is32Bit(off1+off2)
// result: (MOVLstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -6584,8 +6617,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVLstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
......@@ -6615,8 +6651,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6628,11 +6664,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
......@@ -6640,8 +6676,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6654,11 +6690,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -6845,8 +6881,8 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVOload [addOff(off1, off2)] {sym} ptr mem)
// cond: is32Bit(off1+off2)
// result: (MOVOload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -6856,16 +6892,19 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVOload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVOload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6876,11 +6915,11 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVOload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
......@@ -6892,8 +6931,8 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVOstore [addOff(off1, off2)] {sym} ptr val mem)
// cond: is32Bit(off1+off2)
// result: (MOVOstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -6904,8 +6943,11 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVOstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
......@@ -6913,8 +6955,8 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
return true
}
// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVOstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6926,11 +6968,11 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVOstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
......@@ -6965,8 +7007,8 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
return true
}
// match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVQload [addOff(off1, off2)] {sym} ptr mem)
// cond: is32Bit(off1+off2)
// result: (MOVQload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -6976,16 +7018,19 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVQload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -6996,19 +7041,19 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVQloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7020,11 +7065,11 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -7084,8 +7129,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVQstore [addOff(off1, off2)] {sym} ptr val mem)
// cond: is32Bit(off1+off2)
// result: (MOVQstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -7096,8 +7141,11 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVQstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
......@@ -7127,8 +7175,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
return true
}
// match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7140,11 +7188,11 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
......@@ -7152,8 +7200,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
return true
}
// match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVQstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7166,11 +7214,11 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -7357,8 +7405,8 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVSDload [addOff(off1, off2)] {sym} ptr mem)
// cond: is32Bit(off1+off2)
// result: (MOVSDload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -7368,16 +7416,19 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVSDload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSDload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7388,19 +7439,19 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSDloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7412,11 +7463,11 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -7476,8 +7527,8 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVSDstore [addOff(off1, off2)] {sym} ptr val mem)
// cond: is32Bit(off1+off2)
// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -7488,8 +7539,11 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVSDstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
......@@ -7497,8 +7551,8 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSDstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7510,11 +7564,11 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
......@@ -7522,8 +7576,8 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSDstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7536,11 +7590,11 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -7605,8 +7659,8 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVSSload [addOff(off1, off2)] {sym} ptr mem)
// cond: is32Bit(off1+off2)
// result: (MOVSSload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -7616,16 +7670,19 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVSSload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSSload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7636,19 +7693,19 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSSloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7660,11 +7717,11 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -7724,8 +7781,8 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVSSstore [addOff(off1, off2)] {sym} ptr val mem)
// cond: is32Bit(off1+off2)
// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -7736,8 +7793,11 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVSSstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
......@@ -7745,8 +7805,8 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSSstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7758,11 +7818,11 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
......@@ -7770,8 +7830,8 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSSstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7784,11 +7844,11 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -7899,8 +7959,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7911,11 +7971,11 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWQSXload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
......@@ -7970,8 +8030,8 @@ func rewriteValueAMD64_OpAMD64MOVWQZXload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -7982,11 +8042,11 @@ func rewriteValueAMD64_OpAMD64MOVWQZXload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWQZXload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
......@@ -8020,8 +8080,8 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
return true
}
// match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVWload [addOff(off1, off2)] {sym} ptr mem)
// cond: is32Bit(off1+off2)
// result: (MOVWload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -8031,16 +8091,19 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVWload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -8051,19 +8114,19 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWload)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWloadidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -8075,11 +8138,11 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -8179,8 +8242,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
return true
}
// match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVWstore [addOff(off1, off2)] {sym} ptr val mem)
// cond: is32Bit(off1+off2)
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
......@@ -8191,8 +8254,11 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
......@@ -8222,8 +8288,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -8235,11 +8301,11 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
......@@ -8247,8 +8313,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWstoreidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
......@@ -8261,11 +8327,11 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = addOff(off1, off2)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
......@@ -10418,16 +10484,32 @@ func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (OffPtr [off] ptr)
// cond:
// cond: is32Bit(off)
// result: (ADDQconst [off] ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
if !(is32Bit(off)) {
break
}
v.reset(OpAMD64ADDQconst)
v.AuxInt = off
v.AddArg(ptr)
return true
}
// match: (OffPtr [off] ptr)
// cond:
// result: (ADDQ (MOVQconst [off]) ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
v.reset(OpAMD64ADDQ)
v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
v0.AuxInt = off
v.AddArg(v0)
v.AddArg(ptr)
return true
}
return false
}
func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
......@@ -13257,7 +13339,7 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
return true
}
// match: (SUBQconst [c] (SUBQconst [d] x))
// cond:
// cond: is32Bit(-c-d)
// result: (ADDQconst [-c-d] x)
for {
c := v.AuxInt
......@@ -13266,6 +13348,9 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
if !(is32Bit(-c - d)) {
break
}
v.reset(OpAMD64ADDQconst)
v.AuxInt = -c - d
v.AddArg(x)
......
......@@ -2750,6 +2750,16 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
var rel obj.Reloc
rex &= 0x40 | Rxr
switch {
case int64(int32(a.Offset)) == a.Offset:
// Offset fits in sign-extended 32 bits.
case int64(uint32(a.Offset)) == a.Offset && ctxt.Rexflag&Rxw == 0:
// Offset fits in zero-extended 32 bits in a 32-bit instruction.
// This is allowed for assembly that wants to use 32-bit hex
// constants, e.g. LEAL 0x99999999(AX), AX.
default:
ctxt.Diag("offset too large in %s", p)
}
v := int32(a.Offset)
rel.Siz = 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment