Commit 3306d119 authored by Keith Randall's avatar Keith Randall

runtime: unify fastrand1 and fastrand2

C and Go calling conventions are now compatible, so we
don't need two versions of this function.

LGTM=bradfitz
R=golang-codereviews, bradfitz
CC=golang-codereviews
https://golang.org/cl/139080043
parent 1d8fa7fa
...@@ -111,7 +111,7 @@ func f32hash(p unsafe.Pointer, s, h uintptr) uintptr { ...@@ -111,7 +111,7 @@ func f32hash(p unsafe.Pointer, s, h uintptr) uintptr {
case f == 0: case f == 0:
return c1 * (c0 ^ h) // +0, -0 return c1 * (c0 ^ h) // +0, -0
case f != f: case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand2())) // any kind of NaN return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
default: default:
return memhash(p, 4, h) return memhash(p, 4, h)
} }
...@@ -123,7 +123,7 @@ func f64hash(p unsafe.Pointer, s, h uintptr) uintptr { ...@@ -123,7 +123,7 @@ func f64hash(p unsafe.Pointer, s, h uintptr) uintptr {
case f == 0: case f == 0:
return c1 * (c0 ^ h) // +0, -0 return c1 * (c0 ^ h) // +0, -0
case f != f: case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand2())) // any kind of NaN return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
default: default:
return memhash(p, 8, h) return memhash(p, 8, h)
} }
......
...@@ -2283,7 +2283,7 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0 ...@@ -2283,7 +2283,7 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
TEXT runtime·timenow(SB), NOSPLIT, $0-0 TEXT runtime·timenow(SB), NOSPLIT, $0-0
JMP time·now(SB) JMP time·now(SB)
TEXT runtime·fastrand2(SB), NOSPLIT, $0-4 TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
get_tls(CX) get_tls(CX)
MOVL g(CX), AX MOVL g(CX), AX
MOVL g_m(AX), AX MOVL g_m(AX), AX
......
...@@ -2335,7 +2335,7 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0 ...@@ -2335,7 +2335,7 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
TEXT runtime·timenow(SB), NOSPLIT, $0-0 TEXT runtime·timenow(SB), NOSPLIT, $0-0
JMP time·now(SB) JMP time·now(SB)
TEXT runtime·fastrand2(SB), NOSPLIT, $0-4 TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
get_tls(CX) get_tls(CX)
MOVQ g(CX), AX MOVQ g(CX), AX
MOVQ g_m(AX), AX MOVQ g_m(AX), AX
......
...@@ -1208,7 +1208,7 @@ eqret: ...@@ -1208,7 +1208,7 @@ eqret:
TEXT runtime·timenow(SB), NOSPLIT, $0-0 TEXT runtime·timenow(SB), NOSPLIT, $0-0
JMP time·now(SB) JMP time·now(SB)
TEXT runtime·fastrand2(SB), NOSPLIT, $0-4 TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
get_tls(CX) get_tls(CX)
MOVL g(CX), AX MOVL g(CX), AX
MOVL g_m(AX), AX MOVL g_m(AX), AX
......
...@@ -1259,7 +1259,7 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0 ...@@ -1259,7 +1259,7 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
MOVW.P R0, 4(R2) MOVW.P R0, 4(R2)
RET RET
TEXT runtime·fastrand2(SB), NOSPLIT, $-4-4 TEXT runtime·fastrand1(SB), NOSPLIT, $-4-4
MOVW g_m(g), R1 MOVW g_m(g), R1
MOVW m_fastrand(R1), R0 MOVW m_fastrand(R1), R0
ADD.S R0, R0 ADD.S R0, R0
......
...@@ -219,7 +219,7 @@ func makemap(t *maptype, hint int64) *hmap { ...@@ -219,7 +219,7 @@ func makemap(t *maptype, hint int64) *hmap {
h.count = 0 h.count = 0
h.B = B h.B = B
h.flags = 0 h.flags = 0
h.hash0 = fastrand2() h.hash0 = fastrand1()
h.buckets = buckets h.buckets = buckets
h.oldbuckets = nil h.oldbuckets = nil
h.nevacuate = 0 h.nevacuate = 0
...@@ -568,7 +568,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { ...@@ -568,7 +568,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
// iterator state // iterator state
it.bucket = 0 it.bucket = 0
it.offset = uint8(fastrand2() & (bucketCnt - 1)) it.offset = uint8(fastrand1() & (bucketCnt - 1))
it.done = false it.done = false
it.bptr = nil it.bptr = nil
......
...@@ -387,7 +387,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { ...@@ -387,7 +387,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
if rate > 0x3fffffff { // make 2*rate not overflow if rate > 0x3fffffff { // make 2*rate not overflow
rate = 0x3fffffff rate = 0x3fffffff
} }
next := int32(fastrand2()) % (2 * int32(rate)) next := int32(fastrand1()) % (2 * int32(rate))
// Subtract the "remainder" of the current allocation. // Subtract the "remainder" of the current allocation.
// Otherwise objects that are close in size to sampling rate // Otherwise objects that are close in size to sampling rate
// will be under-sampled, because we consistently discard this remainder. // will be under-sampled, because we consistently discard this remainder.
......
...@@ -265,19 +265,6 @@ runtime·check(void) ...@@ -265,19 +265,6 @@ runtime·check(void)
runtime·throw("FixedStack is not power-of-2"); runtime·throw("FixedStack is not power-of-2");
} }
uint32
runtime·fastrand1(void)
{
uint32 x;
x = g->m->fastrand;
x += x;
if(x & 0x80000000L)
x ^= 0x88888eefUL;
g->m->fastrand = x;
return x;
}
static Mutex ticksLock; static Mutex ticksLock;
static int64 ticks; static int64 ticks;
......
...@@ -226,7 +226,7 @@ func selectgoImpl(sel *_select) (uintptr, uint16) { ...@@ -226,7 +226,7 @@ func selectgoImpl(sel *_select) (uintptr, uint16) {
} }
for i := 1; i < int(sel.ncase); i++ { for i := 1; i < int(sel.ncase); i++ {
o := pollorder[i] o := pollorder[i]
j := int(fastrand2()) % (i + 1) j := int(fastrand1()) % (i + 1)
pollorder[i] = pollorder[j] pollorder[i] = pollorder[j]
pollorder[j] = o pollorder[j] = o
} }
......
...@@ -101,9 +101,6 @@ func racemalloc(p unsafe.Pointer, size uintptr) ...@@ -101,9 +101,6 @@ func racemalloc(p unsafe.Pointer, size uintptr)
//go:noescape //go:noescape
func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
// in asm_*.s
func fastrand2() uint32
const ( const (
concurrentSweep = true concurrentSweep = true
) )
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment