Commit 2b74de3e authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

runtime: rename fastrand1 to fastrand

Change-Id: I37706ff0a3486827c5b072c95ad890ea87ede847
Reviewed-on: https://go-review.googlesource.com/28210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent f9dafc74
...@@ -109,7 +109,7 @@ func f32hash(p unsafe.Pointer, h uintptr) uintptr { ...@@ -109,7 +109,7 @@ func f32hash(p unsafe.Pointer, h uintptr) uintptr {
case f == 0: case f == 0:
return c1 * (c0 ^ h) // +0, -0 return c1 * (c0 ^ h) // +0, -0
case f != f: case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
default: default:
return memhash(p, h, 4) return memhash(p, h, 4)
} }
...@@ -121,7 +121,7 @@ func f64hash(p unsafe.Pointer, h uintptr) uintptr { ...@@ -121,7 +121,7 @@ func f64hash(p unsafe.Pointer, h uintptr) uintptr {
case f == 0: case f == 0:
return c1 * (c0 ^ h) // +0, -0 return c1 * (c0 ^ h) // +0, -0
case f != f: case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
default: default:
return memhash(p, h, 8) return memhash(p, h, 8)
} }
......
...@@ -1573,7 +1573,7 @@ allsame: ...@@ -1573,7 +1573,7 @@ allsame:
MOVL BX, (AX) MOVL BX, (AX)
RET RET
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4 TEXT runtime·fastrand(SB), NOSPLIT, $0-4
get_tls(CX) get_tls(CX)
MOVL g(CX), AX MOVL g(CX), AX
MOVL g_m(AX), AX MOVL g_m(AX), AX
......
...@@ -2052,7 +2052,7 @@ eqret: ...@@ -2052,7 +2052,7 @@ eqret:
MOVB $0, ret+48(FP) MOVB $0, ret+48(FP)
RET RET
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4 TEXT runtime·fastrand(SB), NOSPLIT, $0-4
get_tls(CX) get_tls(CX)
MOVQ g(CX), AX MOVQ g(CX), AX
MOVQ g_m(AX), AX MOVQ g_m(AX), AX
......
...@@ -973,7 +973,7 @@ eqret: ...@@ -973,7 +973,7 @@ eqret:
MOVB AX, ret+24(FP) MOVB AX, ret+24(FP)
RET RET
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4 TEXT runtime·fastrand(SB), NOSPLIT, $0-4
get_tls(CX) get_tls(CX)
MOVL g(CX), AX MOVL g(CX), AX
MOVL g_m(AX), AX MOVL g_m(AX), AX
......
...@@ -952,7 +952,7 @@ _sib_notfound: ...@@ -952,7 +952,7 @@ _sib_notfound:
MOVW R0, ret+12(FP) MOVW R0, ret+12(FP)
RET RET
TEXT runtime·fastrand1(SB),NOSPLIT,$-4-4 TEXT runtime·fastrand(SB),NOSPLIT,$-4-4
MOVW g_m(g), R1 MOVW g_m(g), R1
MOVW m_fastrand(R1), R0 MOVW m_fastrand(R1), R0
ADD.S R0, R0 ADD.S R0, R0
......
...@@ -949,7 +949,7 @@ equal: ...@@ -949,7 +949,7 @@ equal:
MOVB R0, ret+48(FP) MOVB R0, ret+48(FP)
RET RET
TEXT runtime·fastrand1(SB),NOSPLIT,$-8-4 TEXT runtime·fastrand(SB),NOSPLIT,$-8-4
MOVD g_m(g), R1 MOVD g_m(g), R1
MOVWU m_fastrand(R1), R0 MOVWU m_fastrand(R1), R0
ADD R0, R0 ADD R0, R0
......
...@@ -822,7 +822,7 @@ notfound: ...@@ -822,7 +822,7 @@ notfound:
MOVV R1, ret+24(FP) MOVV R1, ret+24(FP)
RET RET
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4 TEXT runtime·fastrand(SB), NOSPLIT, $0-4
MOVV g_m(g), R2 MOVV g_m(g), R2
MOVWU m_fastrand(R2), R1 MOVWU m_fastrand(R2), R1
ADDU R1, R1 ADDU R1, R1
......
...@@ -1042,7 +1042,7 @@ samebytes: ...@@ -1042,7 +1042,7 @@ samebytes:
MOVD R8, (R7) MOVD R8, (R7)
RET RET
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4 TEXT runtime·fastrand(SB), NOSPLIT, $0-4
MOVD g_m(g), R4 MOVD g_m(g), R4
MOVWZ m_fastrand(R4), R3 MOVWZ m_fastrand(R4), R3
ADD R3, R3 ADD R3, R3
......
...@@ -874,7 +874,7 @@ TEXT runtime·memeqbodyclc(SB),NOSPLIT|NOFRAME,$0-0 ...@@ -874,7 +874,7 @@ TEXT runtime·memeqbodyclc(SB),NOSPLIT|NOFRAME,$0-0
CLC $1, 0(R3), 0(R5) CLC $1, 0(R3), 0(R5)
RET RET
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4 TEXT runtime·fastrand(SB), NOSPLIT, $0-4
MOVD g_m(g), R4 MOVD g_m(g), R4
MOVWZ m_fastrand(R4), R3 MOVWZ m_fastrand(R4), R3
ADD R3, R3 ADD R3, R3
......
...@@ -256,7 +256,7 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap { ...@@ -256,7 +256,7 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
h.count = 0 h.count = 0
h.B = B h.B = B
h.flags = 0 h.flags = 0
h.hash0 = fastrand1() h.hash0 = fastrand()
h.buckets = buckets h.buckets = buckets
h.oldbuckets = nil h.oldbuckets = nil
h.nevacuate = 0 h.nevacuate = 0
...@@ -655,9 +655,9 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { ...@@ -655,9 +655,9 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
} }
// decide where to start // decide where to start
r := uintptr(fastrand1()) r := uintptr(fastrand())
if h.B > 31-bucketCntBits { if h.B > 31-bucketCntBits {
r += uintptr(fastrand1()) << 31 r += uintptr(fastrand()) << 31
} }
it.startBucket = r & (uintptr(1)<<h.B - 1) it.startBucket = r & (uintptr(1)<<h.B - 1)
it.offset = uint8(r >> h.B & (bucketCnt - 1)) it.offset = uint8(r >> h.B & (bucketCnt - 1))
......
...@@ -843,7 +843,7 @@ func nextSample() int32 { ...@@ -843,7 +843,7 @@ func nextSample() int32 {
// x = -log_e(q) * period // x = -log_e(q) * period
// x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency
const randomBitCount = 26 const randomBitCount = 26
q := fastrand1()%(1<<randomBitCount) + 1 q := fastrand()%(1<<randomBitCount) + 1
qlog := fastlog2(float64(q)) - randomBitCount qlog := fastlog2(float64(q)) - randomBitCount
if qlog > 0 { if qlog > 0 {
qlog = 0 qlog = 0
...@@ -861,7 +861,7 @@ func nextSampleNoFP() int32 { ...@@ -861,7 +861,7 @@ func nextSampleNoFP() int32 {
rate = 0x3fffffff rate = 0x3fffffff
} }
if rate != 0 { if rate != 0 {
return int32(int(fastrand1()) % (2 * rate)) return int32(int(fastrand()) % (2 * rate))
} }
return 0 return 0
} }
......
...@@ -616,7 +616,7 @@ func (c *gcControllerState) enlistWorker() { ...@@ -616,7 +616,7 @@ func (c *gcControllerState) enlistWorker() {
} }
myID := gp.m.p.ptr().id myID := gp.m.p.ptr().id
for tries := 0; tries < 5; tries++ { for tries := 0; tries < 5; tries++ {
id := int32(fastrand1() % uint32(gomaxprocs-1)) id := int32(fastrand() % uint32(gomaxprocs-1))
if id >= myID { if id >= myID {
id++ id++
} }
......
...@@ -289,7 +289,7 @@ func blockevent(cycles int64, skip int) { ...@@ -289,7 +289,7 @@ func blockevent(cycles int64, skip int) {
cycles = 1 cycles = 1
} }
rate := int64(atomic.Load64(&blockprofilerate)) rate := int64(atomic.Load64(&blockprofilerate))
if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) { if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
return return
} }
gp := getg() gp := getg()
......
...@@ -17,8 +17,8 @@ func checkgoarm() { ...@@ -17,8 +17,8 @@ func checkgoarm() {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1(). // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1. // TODO: need more entropy to better seed fastrand.
return nanotime() return nanotime()
} }
...@@ -6,8 +6,8 @@ package runtime ...@@ -6,8 +6,8 @@ package runtime
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1(). // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1. // TODO: need more entropy to better seed fastrand.
return nanotime() return nanotime()
} }
...@@ -17,8 +17,8 @@ func checkgoarm() { ...@@ -17,8 +17,8 @@ func checkgoarm() {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1(). // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1. // TODO: need more entropy to better seed fastrand.
return nanotime() return nanotime()
} }
...@@ -53,8 +53,8 @@ func archauxv(tag, val uintptr) { ...@@ -53,8 +53,8 @@ func archauxv(tag, val uintptr) {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand1(). // Currently cputicks() is used in blocking profiler and to seed fastrand().
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// randomNumber provides better seeding of fastrand1. // randomNumber provides better seeding of fastrand.
return nanotime() + int64(randomNumber) return nanotime() + int64(randomNumber)
} }
...@@ -19,8 +19,8 @@ func archauxv(tag, val uintptr) { ...@@ -19,8 +19,8 @@ func archauxv(tag, val uintptr) {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand1(). // Currently cputicks() is used in blocking profiler and to seed fastrand().
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// randomNumber provides better seeding of fastrand1. // randomNumber provides better seeding of fastrand.
return nanotime() + int64(randomNumber) return nanotime() + int64(randomNumber)
} }
...@@ -22,9 +22,9 @@ func archauxv(tag, val uintptr) { ...@@ -22,9 +22,9 @@ func archauxv(tag, val uintptr) {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand1(). // Currently cputicks() is used in blocking profiler and to seed fastrand().
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// randomNumber provides better seeding of fastrand1. // randomNumber provides better seeding of fastrand.
return nanotime() + int64(randomNumber) return nanotime() + int64(randomNumber)
} }
......
...@@ -16,8 +16,8 @@ func checkgoarm() { ...@@ -16,8 +16,8 @@ func checkgoarm() {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1(). // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1. // TODO: need more entropy to better seed fastrand.
return nanotime() return nanotime()
} }
...@@ -28,8 +28,8 @@ func checkgoarm() { ...@@ -28,8 +28,8 @@ func checkgoarm() {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1(). // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1. // TODO: need more entropy to better seed fastrand.
return nanotime() return nanotime()
} }
...@@ -17,8 +17,8 @@ func checkgoarm() { ...@@ -17,8 +17,8 @@ func checkgoarm() {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1(). // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1. // TODO: need more entropy to better seed fastrand.
return nanotime() return nanotime()
} }
...@@ -10,8 +10,8 @@ func checkgoarm() { ...@@ -10,8 +10,8 @@ func checkgoarm() {
//go:nosplit //go:nosplit
func cputicks() int64 { func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1(). // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1. // TODO: need more entropy to better seed fastrand.
return nanotime() return nanotime()
} }
...@@ -1909,7 +1909,7 @@ top: ...@@ -1909,7 +1909,7 @@ top:
atomic.Xadd(&sched.nmspinning, 1) atomic.Xadd(&sched.nmspinning, 1)
} }
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
for enum := stealOrder.start(fastrand1()); !enum.done(); enum.next() { for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
if sched.gcwaiting != 0 { if sched.gcwaiting != 0 {
goto top goto top
} }
...@@ -4034,7 +4034,7 @@ const randomizeScheduler = raceenabled ...@@ -4034,7 +4034,7 @@ const randomizeScheduler = raceenabled
// If the run queue is full, runnext puts g on the global queue. // If the run queue is full, runnext puts g on the global queue.
// Executed only by the owner P. // Executed only by the owner P.
func runqput(_p_ *p, gp *g, next bool) { func runqput(_p_ *p, gp *g, next bool) {
if randomizeScheduler && next && fastrand1()%2 == 0 { if randomizeScheduler && next && fastrand()%2 == 0 {
next = false next = false
} }
...@@ -4087,7 +4087,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool { ...@@ -4087,7 +4087,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
if randomizeScheduler { if randomizeScheduler {
for i := uint32(1); i <= n; i++ { for i := uint32(1); i <= n; i++ {
j := fastrand1() % (i + 1) j := fastrand() % (i + 1)
batch[i], batch[j] = batch[j], batch[i] batch[i], batch[j] = batch[j], batch[i]
} }
} }
......
...@@ -270,7 +270,7 @@ func selectgoImpl(sel *hselect) (uintptr, uint16) { ...@@ -270,7 +270,7 @@ func selectgoImpl(sel *hselect) (uintptr, uint16) {
pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)} pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice)) pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
for i := 1; i < int(sel.ncase); i++ { for i := 1; i < int(sel.ncase); i++ {
j := int(fastrand1()) % (i + 1) j := int(fastrand()) % (i + 1)
pollorder[i] = pollorder[j] pollorder[i] = pollorder[j]
pollorder[j] = uint16(i) pollorder[j] = uint16(i)
} }
......
...@@ -81,7 +81,7 @@ func reflect_memmove(to, from unsafe.Pointer, n uintptr) { ...@@ -81,7 +81,7 @@ func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
var hashLoad = loadFactor var hashLoad = loadFactor
// in asm_*.s // in asm_*.s
func fastrand1() uint32 func fastrand() uint32
// in asm_*.s // in asm_*.s
//go:noescape //go:noescape
......
...@@ -437,7 +437,7 @@ func pcvalue(f *_func, off int32, targetpc uintptr, cache *pcvalueCache, strict ...@@ -437,7 +437,7 @@ func pcvalue(f *_func, off int32, targetpc uintptr, cache *pcvalueCache, strict
// a recursive stack's cycle is slightly // a recursive stack's cycle is slightly
// larger than the cache. // larger than the cache.
if cache != nil { if cache != nil {
ci := fastrand1() % uint32(len(cache.entries)) ci := fastrand() % uint32(len(cache.entries))
cache.entries[ci] = pcvalueCacheEnt{ cache.entries[ci] = pcvalueCacheEnt{
targetpc: targetpc, targetpc: targetpc,
off: off, off: off,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment