Commit 38cd7988 authored by Cherry Zhang's avatar Cherry Zhang

cmd/compile: simplify div/mod on ARM

On ARM, DIV, DIVU, MOD, MODU are pseudo instructions that makes
runtime calls _div/_udiv/_mod/_umod, which themselves are wrappers
of udiv. The udiv function does the real thing.

Instead of generating these pseudo instructions, call to udiv
directly. This removes one layer of wrappers (which has an awkward
way of passing argument), and also allows combining DIV and MOD
if both results are needed.

Change-Id: I118afc3986db3a1daabb5c1e6e57430888c91817
Reviewed-on: https://go-review.googlesource.com/29390Reviewed-by: default avatarDavid Chase <drchase@google.com>
parent f9648100
...@@ -196,21 +196,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -196,21 +196,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
p.To.Name = obj.NAME_AUTO p.To.Name = obj.NAME_AUTO
} }
case ssa.OpARMDIV, case ssa.OpARMUDIVrtcall:
ssa.OpARMDIVU, p := gc.Prog(obj.ACALL)
ssa.OpARMMOD, p.To.Type = obj.TYPE_MEM
ssa.OpARMMODU: p.To.Name = obj.NAME_EXTERN
// Note: for software division the assembler rewrite these p.To.Sym = obj.Linklookup(gc.Ctxt, "udiv", 0)
// instructions to sequence of instructions:
// - it puts numerator in R11 and denominator in g.m.divmod
// and call (say) _udiv
// - _udiv saves R0-R3 on stack and call udiv, restores R0-R3
// before return
// - udiv does the actual work
//TODO: set approperiate regmasks and call udiv directly?
// need to be careful for negative case
// Or, as soft div is already expensive, we don't care?
fallthrough
case ssa.OpARMADD, case ssa.OpARMADD,
ssa.OpARMADC, ssa.OpARMADC,
ssa.OpARMSUB, ssa.OpARMSUB,
......
...@@ -230,12 +230,8 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config ...@@ -230,12 +230,8 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
if c.nacl { if c.nacl {
c.noDuffDevice = true // Don't use Duff's device on NaCl c.noDuffDevice = true // Don't use Duff's device on NaCl
// ARM assembler rewrites DIV/MOD to runtime calls, which // runtime call clobber R12 on nacl
// clobber R12 on nacl opcodeTable[OpARMUDIVrtcall].reg.clobbers |= 1 << 12 // R12
opcodeTable[OpARMDIV].reg.clobbers |= 1 << 12 // R12
opcodeTable[OpARMDIVU].reg.clobbers |= 1 << 12 // R12
opcodeTable[OpARMMOD].reg.clobbers |= 1 << 12 // R12
opcodeTable[OpARMMODU].reg.clobbers |= 1 << 12 // R12
} }
// Assign IDs to preallocated values/blocks. // Assign IDs to preallocated values/blocks.
......
...@@ -37,21 +37,31 @@ ...@@ -37,21 +37,31 @@
(Mul32uhilo x y) -> (MULLU x y) (Mul32uhilo x y) -> (MULLU x y)
(Div32 x y) -> (DIV x y) (Div32 x y) ->
(Div32u x y) -> (DIVU x y) (SUB (XOR <config.fe.TypeUInt32()> // negate the result if one operand is negative
(Div16 x y) -> (DIV (SignExt16to32 x) (SignExt16to32 y)) (Select0 <config.fe.TypeUInt32()> (UDIVrtcall
(Div16u x y) -> (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)) (SUB <config.fe.TypeUInt32()> (XOR x <config.fe.TypeUInt32()> (Signmask x)) (Signmask x)) // negate x if negative
(Div8 x y) -> (DIV (SignExt8to32 x) (SignExt8to32 y)) (SUB <config.fe.TypeUInt32()> (XOR y <config.fe.TypeUInt32()> (Signmask y)) (Signmask y)))) // negate y if negative
(Div8u x y) -> (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)) (Signmask (XOR <config.fe.TypeUInt32()> x y))) (Signmask (XOR <config.fe.TypeUInt32()> x y)))
(Div32u x y) -> (Select0 <config.fe.TypeUInt32()> (UDIVrtcall x y))
(Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y))
(Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y))
(Div8u x y) -> (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
(Div32F x y) -> (DIVF x y) (Div32F x y) -> (DIVF x y)
(Div64F x y) -> (DIVD x y) (Div64F x y) -> (DIVD x y)
(Mod32 x y) -> (MOD x y) (Mod32 x y) ->
(Mod32u x y) -> (MODU x y) (SUB (XOR <config.fe.TypeUInt32()> // negate the result if x is negative
(Mod16 x y) -> (MOD (SignExt16to32 x) (SignExt16to32 y)) (Select1 <config.fe.TypeUInt32()> (UDIVrtcall
(Mod16u x y) -> (MODU (ZeroExt16to32 x) (ZeroExt16to32 y)) (SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> x (Signmask x)) (Signmask x)) // negate x if negative
(Mod8 x y) -> (MOD (SignExt8to32 x) (SignExt8to32 y)) (SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) // negate y if negative
(Mod8u x y) -> (MODU (ZeroExt8to32 x) (ZeroExt8to32 y)) (Signmask x)) (Signmask x))
(Mod32u x y) -> (Select1 <config.fe.TypeUInt32()> (UDIVrtcall x y))
(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
(Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
(And32 x y) -> (AND x y) (And32 x y) -> (AND x y)
(And16 x y) -> (AND x y) (And16 x y) -> (AND x y)
...@@ -586,8 +596,10 @@ ...@@ -586,8 +596,10 @@
(MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a) (MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
// div by constant // div by constant
(DIVU x (MOVWconst [1])) -> x (Select0 (UDIVrtcall x (MOVWconst [1]))) -> x
(DIVU x (MOVWconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x) (Select1 (UDIVrtcall _ (MOVWconst [1]))) -> (MOVWconst [0])
(Select0 (UDIVrtcall x (MOVWconst [c]))) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x)
(Select1 (UDIVrtcall x (MOVWconst [c]))) && isPowerOfTwo(c) -> (ANDconst [c-1] x)
// constant comparisons // constant comparisons
(CMPconst (MOVWconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) (CMPconst (MOVWconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
...@@ -805,8 +817,8 @@ ...@@ -805,8 +817,8 @@
(SRAconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)>>uint64(c))]) (SRAconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)>>uint64(c))])
(MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c*d))]) (MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c*d))])
(MULA (MOVWconst [c]) (MOVWconst [d]) a) -> (ADDconst [int64(int32(c*d))] a) (MULA (MOVWconst [c]) (MOVWconst [d]) a) -> (ADDconst [int64(int32(c*d))] a)
(DIV (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c)/int32(d))]) (Select0 (UDIVrtcall (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(uint32(c)/uint32(d))])
(DIVU (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(uint32(c)/uint32(d))]) (Select1 (UDIVrtcall (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(uint32(c)%uint32(d))])
(ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d]) (ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x) (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
(ORconst [c] (MOVWconst [d])) -> (MOVWconst [c|d]) (ORconst [c] (MOVWconst [d])) -> (MOVWconst [c|d])
......
...@@ -138,10 +138,21 @@ func init() { ...@@ -138,10 +138,21 @@ func init() {
{name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1 {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
{name: "HMUL", argLength: 2, reg: gp21, asm: "MULL", commutative: true}, // (arg0 * arg1) >> 32, signed {name: "HMUL", argLength: 2, reg: gp21, asm: "MULL", commutative: true}, // (arg0 * arg1) >> 32, signed
{name: "HMULU", argLength: 2, reg: gp21, asm: "MULLU", commutative: true}, // (arg0 * arg1) >> 32, unsigned {name: "HMULU", argLength: 2, reg: gp21, asm: "MULLU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
{name: "DIV", argLength: 2, reg: gp21, asm: "DIV", clobberFlags: true}, // arg0 / arg1, signed, soft div clobbers flags
{name: "DIVU", argLength: 2, reg: gp21, asm: "DIVU", clobberFlags: true}, // arg0 / arg1, unsighed // udiv runtime call for soft division
{name: "MOD", argLength: 2, reg: gp21, asm: "MOD", clobberFlags: true}, // arg0 % arg1, signed // output0 = arg0/arg1, output1 = arg0%arg1
{name: "MODU", argLength: 2, reg: gp21, asm: "MODU", clobberFlags: true}, // arg0 % arg1, unsigned // see ../../../../../runtime/vlop_arm.s
{
name: "UDIVrtcall",
argLength: 2,
reg: regInfo{
inputs: []regMask{buildReg("R1"), buildReg("R0")},
outputs: []regMask{buildReg("R0"), buildReg("R1")},
clobbers: buildReg("R2 R3"), // also clobbers R12 on NaCl (modified in ../config.go)
},
clobberFlags: true,
typ: "(UInt32,UInt32)",
},
{name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag
{name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag
......
...@@ -623,10 +623,7 @@ const ( ...@@ -623,10 +623,7 @@ const (
OpARMMUL OpARMMUL
OpARMHMUL OpARMHMUL
OpARMHMULU OpARMHMULU
OpARMDIV OpARMUDIVrtcall
OpARMDIVU
OpARMMOD
OpARMMODU
OpARMADDS OpARMADDS
OpARMADDSconst OpARMADDSconst
OpARMADC OpARMADC
...@@ -7367,62 +7364,18 @@ var opcodeTable = [...]opInfo{ ...@@ -7367,62 +7364,18 @@ var opcodeTable = [...]opInfo{
}, },
}, },
{ {
name: "DIV", name: "UDIVrtcall",
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
asm: arm.ADIV,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 {0, 2}, // R1
{1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 {1, 1}, // R0
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "DIVU",
argLen: 2,
clobberFlags: true,
asm: arm.ADIVU,
reg: regInfo{
inputs: []inputInfo{
{0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
{1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOD",
argLen: 2,
clobberFlags: true,
asm: arm.AMOD,
reg: regInfo{
inputs: []inputInfo{
{0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
{1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MODU",
argLen: 2,
clobberFlags: true,
asm: arm.AMODU,
reg: regInfo{
inputs: []inputInfo{
{0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
{1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
}, },
clobbers: 12, // R2 R3
outputs: []outputInfo{ outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 {0, 1}, // R0
{1, 2}, // R1
}, },
}, },
}, },
......
This diff is collapsed.
...@@ -107,6 +107,7 @@ TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4 ...@@ -107,6 +107,7 @@ TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4
B runtime·sigpanic(SB) B runtime·sigpanic(SB)
// func udiv(n, d uint32) (q, r uint32) // func udiv(n, d uint32) (q, r uint32)
// compiler knowns the register usage of this function
// Reference: // Reference:
// Sloss, Andrew et. al; ARM System Developer's Guide: Designing and Optimizing System Software // Sloss, Andrew et. al; ARM System Developer's Guide: Designing and Optimizing System Software
// Morgan Kaufmann; 1 edition (April 8, 2004), ISBN 978-1558608740 // Morgan Kaufmann; 1 edition (April 8, 2004), ISBN 978-1558608740
...@@ -117,7 +118,7 @@ TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4 ...@@ -117,7 +118,7 @@ TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4
#define Ra R11 #define Ra R11
// Be careful: Ra == R11 will be used by the linker for synthesized instructions. // Be careful: Ra == R11 will be used by the linker for synthesized instructions.
TEXT udiv<>(SB),NOSPLIT,$-4 TEXT udiv(SB),NOSPLIT,$-4
CLZ Rq, Rs // find normalizing shift CLZ Rq, Rs // find normalizing shift
MOVW.S Rq<<Rs, Ra MOVW.S Rq<<Rs, Ra
MOVW $fast_udiv_tab<>-64(SB), RM MOVW $fast_udiv_tab<>-64(SB), RM
...@@ -227,7 +228,7 @@ TEXT _divu(SB), NOSPLIT, $16-0 ...@@ -227,7 +228,7 @@ TEXT _divu(SB), NOSPLIT, $16-0
MOVW RTMP, Rr /* numerator */ MOVW RTMP, Rr /* numerator */
MOVW g_m(g), Rq MOVW g_m(g), Rq
MOVW m_divmod(Rq), Rq /* denominator */ MOVW m_divmod(Rq), Rq /* denominator */
BL udiv<>(SB) BL udiv(SB)
MOVW Rq, RTMP MOVW Rq, RTMP
MOVW 4(R13), Rq MOVW 4(R13), Rq
MOVW 8(R13), Rr MOVW 8(R13), Rr
...@@ -245,7 +246,7 @@ TEXT _modu(SB), NOSPLIT, $16-0 ...@@ -245,7 +246,7 @@ TEXT _modu(SB), NOSPLIT, $16-0
MOVW RTMP, Rr /* numerator */ MOVW RTMP, Rr /* numerator */
MOVW g_m(g), Rq MOVW g_m(g), Rq
MOVW m_divmod(Rq), Rq /* denominator */ MOVW m_divmod(Rq), Rq /* denominator */
BL udiv<>(SB) BL udiv(SB)
MOVW Rr, RTMP MOVW Rr, RTMP
MOVW 4(R13), Rq MOVW 4(R13), Rq
MOVW 8(R13), Rr MOVW 8(R13), Rr
...@@ -269,7 +270,7 @@ TEXT _div(SB),NOSPLIT,$16-0 ...@@ -269,7 +270,7 @@ TEXT _div(SB),NOSPLIT,$16-0
BGE d2 BGE d2
RSB $0, Rq, Rq RSB $0, Rq, Rq
d0: d0:
BL udiv<>(SB) /* none/both neg */ BL udiv(SB) /* none/both neg */
MOVW Rq, RTMP MOVW Rq, RTMP
B out1 B out1
d1: d1:
...@@ -277,8 +278,8 @@ d1: ...@@ -277,8 +278,8 @@ d1:
BGE d0 BGE d0
RSB $0, Rq, Rq RSB $0, Rq, Rq
d2: d2:
BL udiv<>(SB) /* one neg */ BL udiv(SB) /* one neg */
RSB $0, Rq, RTMP RSB $0, Rq, RTMP
out1: out1:
MOVW 4(R13), Rq MOVW 4(R13), Rq
MOVW 8(R13), Rr MOVW 8(R13), Rr
...@@ -300,11 +301,11 @@ TEXT _mod(SB),NOSPLIT,$16-0 ...@@ -300,11 +301,11 @@ TEXT _mod(SB),NOSPLIT,$16-0
CMP $0, Rr CMP $0, Rr
BGE m1 BGE m1
RSB $0, Rr, Rr RSB $0, Rr, Rr
BL udiv<>(SB) /* neg numerator */ BL udiv(SB) /* neg numerator */
RSB $0, Rr, RTMP RSB $0, Rr, RTMP
B out B out
m1: m1:
BL udiv<>(SB) /* pos numerator */ BL udiv(SB) /* pos numerator */
MOVW Rr, RTMP MOVW Rr, RTMP
out: out:
MOVW 4(R13), Rq MOVW 4(R13), Rq
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment