Commit 9278a04a authored by Keith Randall's avatar Keith Randall

[dev.ssa] cmd/compile: more combining of ops into instructions

Mostly indexed loads.  A few more LEA cases.

Change-Id: Idc1d447ed0dd6e906cd48e70307a95e77f61cf5f
Reviewed-on: https://go-review.googlesource.com/19172Reviewed-by: default avatarTodd Neal <todd@tneal.org>
Run-TryBot: Keith Randall <khr@golang.org>
parent 7de8cfdf
......@@ -4061,7 +4061,7 @@ func (s *genState) genValue(v *ssa.Value) {
p.From.Index = regnum(v.Args[1])
p.To.Type = obj.TYPE_REG
p.To.Reg = regnum(v)
case ssa.OpAMD64MOVSSloadidx4:
case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = regnum(v.Args[0])
......@@ -4070,6 +4070,24 @@ func (s *genState) genValue(v *ssa.Value) {
p.From.Index = regnum(v.Args[1])
p.To.Type = obj.TYPE_REG
p.To.Reg = regnum(v)
case ssa.OpAMD64MOVWloadidx2:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = regnum(v.Args[0])
addAux(&p.From, v)
p.From.Scale = 2
p.From.Index = regnum(v.Args[1])
p.To.Type = obj.TYPE_REG
p.To.Reg = regnum(v)
case ssa.OpAMD64MOVBloadidx1:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = regnum(v.Args[0])
addAux(&p.From, v)
p.From.Scale = 1
p.From.Index = regnum(v.Args[1])
p.To.Type = obj.TYPE_REG
p.To.Reg = regnum(v)
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
......
......@@ -546,9 +546,34 @@
(MULQconst [9] x) -> (LEAQ8 x x)
(MULQconst [c] x) && isPowerOfTwo(c) -> (SHLQconst [log2(c)] x)
// fold add/shift into leaq
// combine add/shift into LEAQ
(ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y)
(ADDQconst [c] (LEAQ8 [d] x y)) -> (LEAQ8 [addOff(c, d)] x y)
(ADDQ x (SHLQconst [2] y)) -> (LEAQ4 x y)
(ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y)
(ADDQ x (ADDQ y y)) -> (LEAQ2 x y)
(ADDQ x (ADDQ x y)) -> (LEAQ2 y x)
(ADDQ x (ADDQ y x)) -> (LEAQ2 y x)
// fold ADDQ into LEAQ
(ADDQconst [c] (LEAQ [d] {s} x)) -> (LEAQ [c+d] {s} x)
(LEAQ [c] {s} (ADDQconst [d] x)) -> (LEAQ [c+d] {s} x)
(LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
(ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
(ADDQ (LEAQ [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
// fold ADDQconst into leaqX
(ADDQconst [c] (LEAQ1 [d] {s} x y)) -> (LEAQ1 [c+d] {s} x y)
(ADDQconst [c] (LEAQ2 [d] {s} x y)) -> (LEAQ2 [c+d] {s} x y)
(ADDQconst [c] (LEAQ4 [d] {s} x y)) -> (LEAQ4 [c+d] {s} x y)
(ADDQconst [c] (LEAQ8 [d] {s} x y)) -> (LEAQ8 [c+d] {s} x y)
(LEAQ1 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
(LEAQ1 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
(LEAQ2 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
(LEAQ2 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
(LEAQ4 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
(LEAQ4 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
(LEAQ8 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
(LEAQ8 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
// reverse ordering of compare instruction
(SETL (InvertFlags x)) -> (SETG x)
......@@ -672,17 +697,13 @@
(MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
// indexed loads and stores
(MOVQloadidx8 [off1] {sym} (ADDQconst [off2] ptr) idx mem) -> (MOVQloadidx8 [addOff(off1, off2)] {sym} ptr idx mem)
(MOVQstoreidx8 [off1] {sym} (ADDQconst [off2] ptr) idx val mem) -> (MOVQstoreidx8 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVSSloadidx4 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx mem) -> (MOVSSloadidx4 [addOff(off1, off2)] {sym} ptr idx mem)
(MOVSSstoreidx4 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx val mem) -> (MOVSSstoreidx4 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVSDloadidx8 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx mem) -> (MOVSDloadidx8 [addOff(off1, off2)] {sym} ptr idx mem)
(MOVSDstoreidx8 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx val mem) -> (MOVSDstoreidx8 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVLstoreidx4 [off1] {sym} (ADDQconst [off2] ptr) idx val mem) -> (MOVLstoreidx4 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVWstoreidx2 [off1] {sym} (ADDQconst [off2] ptr) idx val mem) -> (MOVWstoreidx2 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVBstoreidx1 [off1] {sym} (ADDQconst [off2] ptr) idx val mem) -> (MOVBstoreidx1 [addOff(off1, off2)] {sym} ptr idx val mem)
// generating indexed loads and stores
(MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVBloadidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVWloadidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVLloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVQloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
......@@ -703,7 +724,37 @@
(MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVSDstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVBstore [off] {sym} (ADDQ ptr idx) val mem) -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
(MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
(MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
// combine ADDQ into indexed loads and stores
(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem)
(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
// fold LEAQs together
(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && canMergeSym(sym1, sym2) ->
......
......@@ -362,31 +362,37 @@ func init() {
{name: "LEAQ2", reg: gp21sb, aux: "SymOff"}, // arg0 + 2*arg1 + auxint + aux
{name: "LEAQ4", reg: gp21sb, aux: "SymOff"}, // arg0 + 4*arg1 + auxint + aux
{name: "LEAQ8", reg: gp21sb, aux: "SymOff"}, // arg0 + 8*arg1 + auxint + aux
// Note: LEAQ{1,2,4,8} must not have OpSB as either argument.
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
{name: "MOVBload", reg: gpload, asm: "MOVB", aux: "SymOff", typ: "UInt8"}, // load byte from arg0+auxint+aux. arg1=mem
{name: "MOVBQSXload", reg: gpload, asm: "MOVBQSX", aux: "SymOff"}, // ditto, extend to int64
{name: "MOVBQZXload", reg: gpload, asm: "MOVBQZX", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVWload", reg: gpload, asm: "MOVW", aux: "SymOff", typ: "UInt16"}, // load 2 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVWQSXload", reg: gpload, asm: "MOVWQSX", aux: "SymOff"}, // ditto, extend to int64
{name: "MOVWQZXload", reg: gpload, asm: "MOVWQZX", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVLload", reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32"}, // load 4 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVLQSXload", reg: gpload, asm: "MOVLQSX", aux: "SymOff"}, // ditto, extend to int64
{name: "MOVLQZXload", reg: gpload, asm: "MOVLQZX", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVQload", reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVQloadidx8", reg: gploadidx, asm: "MOVQ", aux: "SymOff"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
{name: "MOVBstore", reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVWstore", reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVLstore", reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVQstore", reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVBload", reg: gpload, asm: "MOVB", aux: "SymOff", typ: "UInt8"}, // load byte from arg0+auxint+aux. arg1=mem
{name: "MOVBQSXload", reg: gpload, asm: "MOVBQSX", aux: "SymOff"}, // ditto, extend to int64
{name: "MOVBQZXload", reg: gpload, asm: "MOVBQZX", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVWload", reg: gpload, asm: "MOVW", aux: "SymOff", typ: "UInt16"}, // load 2 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVWQSXload", reg: gpload, asm: "MOVWQSX", aux: "SymOff"}, // ditto, extend to int64
{name: "MOVWQZXload", reg: gpload, asm: "MOVWQZX", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVLload", reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32"}, // load 4 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVLQSXload", reg: gpload, asm: "MOVLQSX", aux: "SymOff"}, // ditto, extend to int64
{name: "MOVLQZXload", reg: gpload, asm: "MOVLQZX", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVQload", reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVBstore", reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVWstore", reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVLstore", reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVQstore", reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVOload", reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128"}, // load 16 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVOstore", reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
// indexed loads/stores
{name: "MOVBloadidx1", reg: gploadidx, asm: "MOVB", aux: "SymOff"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
{name: "MOVWloadidx2", reg: gploadidx, asm: "MOVW", aux: "SymOff"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
{name: "MOVLloadidx4", reg: gploadidx, asm: "MOVL", aux: "SymOff"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
{name: "MOVQloadidx8", reg: gploadidx, asm: "MOVQ", aux: "SymOff"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
// TODO: sign-extending indexed loads
{name: "MOVBstoreidx1", reg: gpstoreidx, asm: "MOVB", aux: "SymOff"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
{name: "MOVWstoreidx2", reg: gpstoreidx, asm: "MOVW", aux: "SymOff"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
{name: "MOVLstoreidx4", reg: gpstoreidx, asm: "MOVL", aux: "SymOff"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
{name: "MOVQstoreidx8", reg: gpstoreidx, asm: "MOVQ", aux: "SymOff"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
{name: "MOVOload", reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128"}, // load 16 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVOstore", reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
// TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
// For storeconst ops, the AuxInt field encodes both
// the value to store and an address offset of the store.
......
......@@ -260,17 +260,20 @@ const (
OpAMD64MOVLQSXload
OpAMD64MOVLQZXload
OpAMD64MOVQload
OpAMD64MOVQloadidx8
OpAMD64MOVBstore
OpAMD64MOVWstore
OpAMD64MOVLstore
OpAMD64MOVQstore
OpAMD64MOVOload
OpAMD64MOVOstore
OpAMD64MOVBloadidx1
OpAMD64MOVWloadidx2
OpAMD64MOVLloadidx4
OpAMD64MOVQloadidx8
OpAMD64MOVBstoreidx1
OpAMD64MOVWstoreidx2
OpAMD64MOVLstoreidx4
OpAMD64MOVQstoreidx8
OpAMD64MOVOload
OpAMD64MOVOstore
OpAMD64MOVBstoreconst
OpAMD64MOVWstoreconst
OpAMD64MOVLstoreconst
......@@ -3129,20 +3132,6 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVQloadidx8",
auxType: auxSymOff,
asm: x86.AMOVQ,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVBstore",
auxType: auxSymOff,
......@@ -3188,45 +3177,89 @@ var opcodeTable = [...]opInfo{
},
},
{
name: "MOVBstoreidx1",
name: "MOVOload",
auxType: auxSymOff,
asm: x86.AMOVUPS,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
4294901760, // .X0 .X1 .X2 .X3 .X4 .X5 .X6 .X7 .X8 .X9 .X10 .X11 .X12 .X13 .X14 .X15
},
},
},
{
name: "MOVOstore",
auxType: auxSymOff,
asm: x86.AMOVUPS,
reg: regInfo{
inputs: []inputInfo{
{1, 4294901760}, // .X0 .X1 .X2 .X3 .X4 .X5 .X6 .X7 .X8 .X9 .X10 .X11 .X12 .X13 .X14 .X15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
},
},
{
name: "MOVBloadidx1",
auxType: auxSymOff,
asm: x86.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVWstoreidx2",
name: "MOVWloadidx2",
auxType: auxSymOff,
asm: x86.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVLstoreidx4",
name: "MOVLloadidx4",
auxType: auxSymOff,
asm: x86.AMOVL,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVQstoreidx8",
name: "MOVQloadidx8",
auxType: auxSymOff,
asm: x86.AMOVQ,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVBstoreidx1",
auxType: auxSymOff,
asm: x86.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
......@@ -3236,25 +3269,37 @@ var opcodeTable = [...]opInfo{
},
},
{
name: "MOVOload",
name: "MOVWstoreidx2",
auxType: auxSymOff,
asm: x86.AMOVUPS,
asm: x86.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
4294901760, // .X0 .X1 .X2 .X3 .X4 .X5 .X6 .X7 .X8 .X9 .X10 .X11 .X12 .X13 .X14 .X15
},
},
{
name: "MOVLstoreidx4",
auxType: auxSymOff,
asm: x86.AMOVL,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
},
},
{
name: "MOVOstore",
name: "MOVQstoreidx8",
auxType: auxSymOff,
asm: x86.AMOVUPS,
asm: x86.AMOVQ,
reg: regInfo{
inputs: []inputInfo{
{1, 4294901760}, // .X0 .X1 .X2 .X3 .X4 .X5 .X6 .X7 .X8 .X9 .X10 .X11 .X12 .X13 .X14 .X15
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
},
......
......@@ -117,7 +117,7 @@ func (v *Value) LongString() string {
if v.Aux != nil {
s += fmt.Sprintf(" {%s}", v.Aux)
}
s += fmt.Sprintf(" [%s]", v.AuxInt)
s += fmt.Sprintf(" [%d]", v.AuxInt)
case auxSymValAndOff:
if v.Aux != nil {
s += fmt.Sprintf(" {%s}", v.Aux)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment