Commit d21638b5 authored by Russ Cox's avatar Russ Cox

cmd/cc, runtime: preserve C runtime type names in generated Go

uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.

Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).

LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
parent 43d4f93c
...@@ -1402,6 +1402,10 @@ xdecl(int c, Type *t, Sym *s) ...@@ -1402,6 +1402,10 @@ xdecl(int c, Type *t, Sym *s)
} }
tmerge(t, s); tmerge(t, s);
s->type = t; s->type = t;
if(c == CTYPEDEF && (typechlv[t->etype] || typefd[t->etype])) {
s->type = copytyp(t);
s->type->tag = s;
}
s->class = c; s->class = c;
s->block = 0; s->block = 0;
s->offset = o; s->offset = o;
......
...@@ -188,60 +188,27 @@ printtypename(Type *t) ...@@ -188,60 +188,27 @@ printtypename(Type *t)
switch(t->etype) { switch(t->etype) {
case TINT: case TINT:
Bprint(&outbuf, "int32");
break;
case TUINT: case TUINT:
Bprint(&outbuf, "uint32");
break;
case TCHAR: case TCHAR:
Bprint(&outbuf, "int8");
break;
case TUCHAR: case TUCHAR:
Bprint(&outbuf, "uint8");
break;
case TSHORT: case TSHORT:
Bprint(&outbuf, "int16");
break;
case TUSHORT: case TUSHORT:
Bprint(&outbuf, "uint16");
break;
case TLONG: case TLONG:
// The 32/64-bit ambiguous types (int,uint,uintptr)
// are assigned a TLONG/TULONG to distinguish them
// from always 32-bit types which get a TINT/TUINT.
// (See int_x/uint_x in pkg/runtime/runtime.h.)
// For LONG and VLONG types, we generate the
// unqualified Go type when appropriate.
// This makes it easier to write Go code that
// modifies objects with autogenerated-from-C types.
if(ewidth[TIND] == 4)
Bprint(&outbuf, "int");
else
Bprint(&outbuf, "int32");
break;
case TULONG: case TULONG:
if(ewidth[TIND] == 4)
Bprint(&outbuf, "uint");
else
Bprint(&outbuf, "uint32");
break;
case TVLONG: case TVLONG:
if(ewidth[TIND] == 8)
Bprint(&outbuf, "int");
else
Bprint(&outbuf, "int64");
break;
case TUVLONG: case TUVLONG:
if(ewidth[TIND] == 8)
Bprint(&outbuf, "uint");
else
Bprint(&outbuf, "uint64");
break;
case TFLOAT: case TFLOAT:
Bprint(&outbuf, "float32");
break;
case TDOUBLE: case TDOUBLE:
Bprint(&outbuf, "float64"); // All names used in the runtime code should be typedefs.
if(t->tag != nil) {
if(strcmp(t->tag->name, "intgo") == 0)
Bprint(&outbuf, "int");
else if(strcmp(t->tag->name, "uintgo") == 0)
Bprint(&outbuf, "uint");
else
Bprint(&outbuf, "%s", t->tag->name);
} else
Bprint(&outbuf, "C.%T", t);
break; break;
case TUNION: case TUNION:
case TSTRUCT: case TSTRUCT:
......
...@@ -7,3 +7,6 @@ package runtime ...@@ -7,3 +7,6 @@ package runtime
const ( const (
cacheLineSize = 64 cacheLineSize = 64
) )
type uintreg uint32
type intptr int32 // TODO(rsc): remove
...@@ -7,3 +7,6 @@ package runtime ...@@ -7,3 +7,6 @@ package runtime
const ( const (
cacheLineSize = 64 cacheLineSize = 64
) )
type uintreg uint64
type intptr int64 // TODO(rsc): remove
...@@ -7,3 +7,6 @@ package runtime ...@@ -7,3 +7,6 @@ package runtime
const ( const (
cacheLineSize = 64 cacheLineSize = 64
) )
type uintreg uint64
type intptr int32 // TODO(rsc): remove
...@@ -7,3 +7,6 @@ package runtime ...@@ -7,3 +7,6 @@ package runtime
const ( const (
cacheLineSize = 32 cacheLineSize = 32
) )
type uintreg uint32
type intptr int32 // TODO(rsc): remove
...@@ -502,6 +502,9 @@ TEXT runtime·cas(SB), NOSPLIT, $0-13 ...@@ -502,6 +502,9 @@ TEXT runtime·cas(SB), NOSPLIT, $0-13
MOVB AX, ret+12(FP) MOVB AX, ret+12(FP)
RET RET
TEXT runtime·casuintptr(SB), NOSPLIT, $0-13
JMP runtime·cas(SB)
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new) // bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically: // Atomically:
// if(*val == *old){ // if(*val == *old){
......
...@@ -620,6 +620,9 @@ cas64_fail: ...@@ -620,6 +620,9 @@ cas64_fail:
MOVL $0, AX MOVL $0, AX
MOVB AX, ret+24(FP) MOVB AX, ret+24(FP)
RET RET
TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
JMP runtime·cas64(SB)
// bool casp(void **val, void *old, void *new) // bool casp(void **val, void *old, void *new)
// Atomically: // Atomically:
......
...@@ -275,7 +275,7 @@ TEXT runtime·newstackcall(SB), NOSPLIT, $0-12 ...@@ -275,7 +275,7 @@ TEXT runtime·newstackcall(SB), NOSPLIT, $0-12
// restore when returning from f. // restore when returning from f.
MOVL 0(SP), AX // our caller's PC MOVL 0(SP), AX // our caller's PC
MOVL AX, (m_morebuf+gobuf_pc)(BX) MOVL AX, (m_morebuf+gobuf_pc)(BX)
LEAL addr+4(FP), AX // our caller's SP LEAL fn+0(FP), AX // our caller's SP
MOVL AX, (m_morebuf+gobuf_sp)(BX) MOVL AX, (m_morebuf+gobuf_sp)(BX)
MOVL g(CX), AX MOVL g(CX), AX
MOVL AX, (m_morebuf+gobuf_g)(BX) MOVL AX, (m_morebuf+gobuf_g)(BX)
...@@ -562,6 +562,9 @@ TEXT runtime·cas(SB), NOSPLIT, $0-17 ...@@ -562,6 +562,9 @@ TEXT runtime·cas(SB), NOSPLIT, $0-17
MOVB AX, ret+16(FP) MOVB AX, ret+16(FP)
RET RET
TEXT runtime·casuintptr(SB), NOSPLIT, $0-17
JMP runtime·cas(SB)
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new) // bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically: // Atomically:
// if(*val == *old){ // if(*val == *old){
......
...@@ -671,7 +671,7 @@ TEXT runtime·abort(SB),NOSPLIT,$-4-0 ...@@ -671,7 +671,7 @@ TEXT runtime·abort(SB),NOSPLIT,$-4-0
// TEXT runtime·cas(SB),NOSPLIT,$0 // TEXT runtime·cas(SB),NOSPLIT,$0
// B runtime·armcas(SB) // B runtime·armcas(SB)
// //
TEXT runtime·armcas(SB),NOSPLIT,$0-12 TEXT runtime·armcas(SB),NOSPLIT,$0-13
MOVW valptr+0(FP), R1 MOVW valptr+0(FP), R1
MOVW old+4(FP), R2 MOVW old+4(FP), R2
MOVW new+8(FP), R3 MOVW new+8(FP), R3
...@@ -683,11 +683,16 @@ casl: ...@@ -683,11 +683,16 @@ casl:
CMP $0, R0 CMP $0, R0
BNE casl BNE casl
MOVW $1, R0 MOVW $1, R0
MOVB R0, ret+12(FP)
RET RET
casfail: casfail:
MOVW $0, R0 MOVW $0, R0
MOVB R0, ret+12(FP)
RET RET
TEXT runtime·casuintptr(SB), NOSPLIT, $0-13
JMP runtime·cas(SB)
TEXT runtime·stackguard(SB),NOSPLIT,$0-8 TEXT runtime·stackguard(SB),NOSPLIT,$0-8
MOVW R13, R1 MOVW R13, R1
MOVW g_stackguard(g), R2 MOVW g_stackguard(g), R2
......
...@@ -66,7 +66,7 @@ func chanbuf(c *hchan, i uint) unsafe.Pointer { ...@@ -66,7 +66,7 @@ func chanbuf(c *hchan, i uint) unsafe.Pointer {
// entry point for c <- x from compiled code // entry point for c <- x from compiled code
//go:nosplit //go:nosplit
func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) { func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) {
chansend(t, c, elem, true, gogetcallerpc(unsafe.Pointer(&t))) chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t)))
} }
/* /*
...@@ -127,7 +127,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -127,7 +127,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
var t0 int64 var t0 int64
if blockprofilerate > 0 { if blockprofilerate > 0 {
t0 = gocputicks() t0 = cputicks()
} }
golock(&c.lock) golock(&c.lock)
...@@ -155,7 +155,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -155,7 +155,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
// to assign to both types in Go. At some point we'll // to assign to both types in Go. At some point we'll
// write the Go types directly instead of generating them // write the Go types directly instead of generating them
// via the C types. At that point, this nastiness goes away. // via the C types. At that point, this nastiness goes away.
*(*int64)(unsafe.Pointer(&sg.releasetime)) = gocputicks() *(*int64)(unsafe.Pointer(&sg.releasetime)) = cputicks()
} }
goready(recvg) goready(recvg)
return true return true
...@@ -189,7 +189,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -189,7 +189,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
panic("send on closed channel") panic("send on closed channel")
} }
if mysg.releasetime > 0 { if mysg.releasetime > 0 {
goblockevent(int64(mysg.releasetime)-t0, 3) blockevent(int64(mysg.releasetime)-t0, 2)
} }
if mysg != gp.waiting { if mysg != gp.waiting {
gothrow("G waiting list is corrupted!") gothrow("G waiting list is corrupted!")
...@@ -248,14 +248,14 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -248,14 +248,14 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
recvg := sg.g recvg := sg.g
gounlock(&c.lock) gounlock(&c.lock)
if sg.releasetime != 0 { if sg.releasetime != 0 {
*(*int64)(unsafe.Pointer(&sg.releasetime)) = gocputicks() *(*int64)(unsafe.Pointer(&sg.releasetime)) = cputicks()
} }
goready(recvg) goready(recvg)
} else { } else {
gounlock(&c.lock) gounlock(&c.lock)
} }
if t1 > 0 { if t1 > 0 {
goblockevent(t1-t0, 3) blockevent(t1-t0, 2)
} }
return true return true
} }
...@@ -285,7 +285,7 @@ func (q *waitq) dequeue() *sudog { ...@@ -285,7 +285,7 @@ func (q *waitq) dequeue() *sudog {
// if sgp participates in a select and is already signaled, ignore it // if sgp participates in a select and is already signaled, ignore it
if sgp.selectdone != nil { if sgp.selectdone != nil {
// claim the right to signal // claim the right to signal
if *sgp.selectdone != 0 || !gocas(sgp.selectdone, 0, 1) { if *sgp.selectdone != 0 || !cas(sgp.selectdone, 0, 1) {
continue continue
} }
} }
......
...@@ -75,7 +75,7 @@ var ( ...@@ -75,7 +75,7 @@ var (
func NewParFor(nthrmax uint32) *ParFor { func NewParFor(nthrmax uint32) *ParFor {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(nthrmax) mp.scalararg[0] = uintptr(nthrmax)
onM(&newparfor_m) onM(&newparfor_m)
desc := (*ParFor)(mp.ptrarg[0]) desc := (*ParFor)(mp.ptrarg[0])
mp.ptrarg[0] = nil mp.ptrarg[0] = nil
...@@ -88,8 +88,8 @@ func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(* ...@@ -88,8 +88,8 @@ func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*
mp.ptrarg[0] = unsafe.Pointer(desc) mp.ptrarg[0] = unsafe.Pointer(desc)
mp.ptrarg[1] = unsafe.Pointer(ctx) mp.ptrarg[1] = unsafe.Pointer(ctx)
mp.ptrarg[2] = **(**unsafe.Pointer)(unsafe.Pointer(&body)) mp.ptrarg[2] = **(**unsafe.Pointer)(unsafe.Pointer(&body))
mp.scalararg[0] = uint(nthr) mp.scalararg[0] = uintptr(nthr)
mp.scalararg[1] = uint(n) mp.scalararg[1] = uintptr(n)
mp.scalararg[2] = 0 mp.scalararg[2] = 0
if wait { if wait {
mp.scalararg[2] = 1 mp.scalararg[2] = 1
...@@ -108,7 +108,7 @@ func ParForDo(desc *ParFor) { ...@@ -108,7 +108,7 @@ func ParForDo(desc *ParFor) {
func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) { func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(desc) mp.ptrarg[0] = unsafe.Pointer(desc)
mp.scalararg[0] = uint(tid) mp.scalararg[0] = uintptr(tid)
onM(&parforiters_m) onM(&parforiters_m)
begin := uint32(mp.scalararg[0]) begin := uint32(mp.scalararg[0])
end := uint32(mp.scalararg[1]) end := uint32(mp.scalararg[1])
......
...@@ -162,12 +162,12 @@ func makemap(t *maptype, hint int64) *hmap { ...@@ -162,12 +162,12 @@ func makemap(t *maptype, hint int64) *hmap {
} }
// check compiler's and reflect's math // check compiler's and reflect's math
if t.key.size > maxKeySize && (t.indirectkey == 0 || t.keysize != uint8(ptrSize)) || if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
t.key.size <= maxKeySize && (t.indirectkey == 1 || t.keysize != uint8(t.key.size)) { t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
gothrow("key size wrong") gothrow("key size wrong")
} }
if t.elem.size > maxValueSize && (t.indirectvalue == 0 || t.valuesize != uint8(ptrSize)) || if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
t.elem.size <= maxValueSize && (t.indirectvalue == 1 || t.valuesize != uint8(t.elem.size)) { t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
gothrow("value size wrong") gothrow("value size wrong")
} }
...@@ -234,7 +234,7 @@ func makemap(t *maptype, hint int64) *hmap { ...@@ -234,7 +234,7 @@ func makemap(t *maptype, hint int64) *hmap {
// hold onto it for very long. // hold onto it for very long.
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess1 fn := mapaccess1
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -263,12 +263,12 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -263,12 +263,12 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
continue continue
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey != 0 { if t.indirectkey {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if alg.equal(key, k, uintptr(t.key.size)) { if alg.equal(key, k, uintptr(t.key.size)) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue != 0 { if t.indirectvalue {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
return v return v
...@@ -283,7 +283,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -283,7 +283,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess2 fn := mapaccess2
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -312,12 +312,12 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -312,12 +312,12 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
continue continue
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey != 0 { if t.indirectkey {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if alg.equal(key, k, uintptr(t.key.size)) { if alg.equal(key, k, uintptr(t.key.size)) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue != 0 { if t.indirectvalue {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
return v, true return v, true
...@@ -355,12 +355,12 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe ...@@ -355,12 +355,12 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
continue continue
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey != 0 { if t.indirectkey {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if alg.equal(key, k, uintptr(t.key.size)) { if alg.equal(key, k, uintptr(t.key.size)) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue != 0 { if t.indirectvalue {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
return k, v return k, v
...@@ -378,7 +378,7 @@ func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { ...@@ -378,7 +378,7 @@ func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
panic("assignment to entry in nil map") panic("assignment to entry in nil map")
} }
if raceenabled { if raceenabled {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapassign1 fn := mapassign1
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racewritepc(unsafe.Pointer(h), callerpc, pc) racewritepc(unsafe.Pointer(h), callerpc, pc)
...@@ -422,7 +422,7 @@ again: ...@@ -422,7 +422,7 @@ again:
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k k2 := k
if t.indirectkey != 0 { if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2)) k2 = *((*unsafe.Pointer)(k2))
} }
if !alg.equal(key, k2, uintptr(t.key.size)) { if !alg.equal(key, k2, uintptr(t.key.size)) {
...@@ -432,7 +432,7 @@ again: ...@@ -432,7 +432,7 @@ again:
memmove(k2, key, uintptr(t.key.size)) memmove(k2, key, uintptr(t.key.size))
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
v2 := v v2 := v
if t.indirectvalue != 0 { if t.indirectvalue {
v2 = *((*unsafe.Pointer)(v2)) v2 = *((*unsafe.Pointer)(v2))
} }
memmove(v2, val, uintptr(t.elem.size)) memmove(v2, val, uintptr(t.elem.size))
...@@ -463,7 +463,7 @@ again: ...@@ -463,7 +463,7 @@ again:
} }
// store new key/value at insert position // store new key/value at insert position
if t.indirectkey != 0 { if t.indirectkey {
if checkgc { if checkgc {
memstats.next_gc = memstats.heap_alloc memstats.next_gc = memstats.heap_alloc
} }
...@@ -471,7 +471,7 @@ again: ...@@ -471,7 +471,7 @@ again:
*(*unsafe.Pointer)(insertk) = kmem *(*unsafe.Pointer)(insertk) = kmem
insertk = kmem insertk = kmem
} }
if t.indirectvalue != 0 { if t.indirectvalue {
if checkgc { if checkgc {
memstats.next_gc = memstats.heap_alloc memstats.next_gc = memstats.heap_alloc
} }
...@@ -487,7 +487,7 @@ again: ...@@ -487,7 +487,7 @@ again:
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapdelete fn := mapdelete
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racewritepc(unsafe.Pointer(h), callerpc, pc) racewritepc(unsafe.Pointer(h), callerpc, pc)
...@@ -514,7 +514,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { ...@@ -514,7 +514,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k k2 := k
if t.indirectkey != 0 { if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2)) k2 = *((*unsafe.Pointer)(k2))
} }
if !alg.equal(key, k2, uintptr(t.key.size)) { if !alg.equal(key, k2, uintptr(t.key.size)) {
...@@ -544,7 +544,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { ...@@ -544,7 +544,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
it.bptr = nil it.bptr = nil
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapiterinit fn := mapiterinit
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -579,7 +579,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { ...@@ -579,7 +579,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
if old == old|iterator|oldIterator { if old == old|iterator|oldIterator {
break break
} }
if gocas(&h.flags, old, old|iterator|oldIterator) { if cas(&h.flags, old, old|iterator|oldIterator) {
break break
} }
} }
...@@ -590,7 +590,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { ...@@ -590,7 +590,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
func mapiternext(it *hiter) { func mapiternext(it *hiter) {
h := it.h h := it.h
if raceenabled { if raceenabled {
callerpc := gogetcallerpc(unsafe.Pointer(&it)) callerpc := getcallerpc(unsafe.Pointer(&it))
fn := mapiternext fn := mapiternext
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -648,7 +648,7 @@ next: ...@@ -648,7 +648,7 @@ next:
// to the other new bucket (each oldbucket expands to two // to the other new bucket (each oldbucket expands to two
// buckets during a grow). // buckets during a grow).
k2 := k k2 := k
if t.indirectkey != 0 { if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2)) k2 = *((*unsafe.Pointer)(k2))
} }
if alg.equal(k2, k2, uintptr(t.key.size)) { if alg.equal(k2, k2, uintptr(t.key.size)) {
...@@ -673,11 +673,11 @@ next: ...@@ -673,11 +673,11 @@ next:
} }
if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY { if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
// this is the golden data, we can return it. // this is the golden data, we can return it.
if t.indirectkey != 0 { if t.indirectkey {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
it.key = k it.key = k
if t.indirectvalue != 0 { if t.indirectvalue {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
it.value = v it.value = v
...@@ -685,7 +685,7 @@ next: ...@@ -685,7 +685,7 @@ next:
// The hash table has grown since the iterator was started. // The hash table has grown since the iterator was started.
// The golden data for this key is now somewhere else. // The golden data for this key is now somewhere else.
k2 := k k2 := k
if t.indirectkey != 0 { if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2)) k2 = *((*unsafe.Pointer)(k2))
} }
if alg.equal(k2, k2, uintptr(t.key.size)) { if alg.equal(k2, k2, uintptr(t.key.size)) {
...@@ -706,7 +706,7 @@ next: ...@@ -706,7 +706,7 @@ next:
// us because when key!=key we can't look it up // us because when key!=key we can't look it up
// successfully in the current table. // successfully in the current table.
it.key = k2 it.key = k2
if t.indirectvalue != 0 { if t.indirectvalue {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
it.value = v it.value = v
...@@ -790,7 +790,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -790,7 +790,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
gothrow("bad map state") gothrow("bad map state")
} }
k2 := k k2 := k
if t.indirectkey != 0 { if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2)) k2 = *((*unsafe.Pointer)(k2))
} }
// Compute hash to make our evacuation decision (whether we need // Compute hash to make our evacuation decision (whether we need
...@@ -834,12 +834,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -834,12 +834,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
xv = add(xk, bucketCnt*uintptr(t.keysize)) xv = add(xk, bucketCnt*uintptr(t.keysize))
} }
x.tophash[xi] = top x.tophash[xi] = top
if t.indirectkey != 0 { if t.indirectkey {
*(*unsafe.Pointer)(xk) = k2 // copy pointer *(*unsafe.Pointer)(xk) = k2 // copy pointer
} else { } else {
memmove(xk, k, uintptr(t.key.size)) // copy value memmove(xk, k, uintptr(t.key.size)) // copy value
} }
if t.indirectvalue != 0 { if t.indirectvalue {
*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v) *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
} else { } else {
memmove(xv, v, uintptr(t.elem.size)) memmove(xv, v, uintptr(t.elem.size))
...@@ -861,12 +861,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -861,12 +861,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
yv = add(yk, bucketCnt*uintptr(t.keysize)) yv = add(yk, bucketCnt*uintptr(t.keysize))
} }
y.tophash[yi] = top y.tophash[yi] = top
if t.indirectkey != 0 { if t.indirectkey {
*(*unsafe.Pointer)(yk) = k2 *(*unsafe.Pointer)(yk) = k2
} else { } else {
memmove(yk, k, uintptr(t.key.size)) memmove(yk, k, uintptr(t.key.size))
} }
if t.indirectvalue != 0 { if t.indirectvalue {
*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v) *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
} else { } else {
memmove(yv, v, uintptr(t.elem.size)) memmove(yv, v, uintptr(t.elem.size))
...@@ -941,7 +941,7 @@ func reflect_maplen(h *hmap) int { ...@@ -941,7 +941,7 @@ func reflect_maplen(h *hmap) int {
return 0 return 0
} }
if raceenabled { if raceenabled {
callerpc := gogetcallerpc(unsafe.Pointer(&h)) callerpc := getcallerpc(unsafe.Pointer(&h))
fn := reflect_maplen fn := reflect_maplen
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
......
...@@ -10,7 +10,7 @@ import ( ...@@ -10,7 +10,7 @@ import (
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess1_fast32 fn := mapaccess1_fast32
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -54,7 +54,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -54,7 +54,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess2_fast32 fn := mapaccess2_fast32
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -98,7 +98,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { ...@@ -98,7 +98,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess1_fast64 fn := mapaccess1_fast64
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -142,7 +142,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -142,7 +142,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess2_fast64 fn := mapaccess2_fast64
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -186,7 +186,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { ...@@ -186,7 +186,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess1_faststr fn := mapaccess1_faststr
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
...@@ -290,7 +290,7 @@ dohash: ...@@ -290,7 +290,7 @@ dohash:
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess2_faststr fn := mapaccess2_faststr
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc) racereadpc(unsafe.Pointer(h), callerpc, pc)
......
...@@ -53,7 +53,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { ...@@ -53,7 +53,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
if locked != 0 { if locked != 0 {
golock(&ifaceLock) golock(&ifaceLock)
} }
for m = (*itab)(goatomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link { for m = (*itab)(atomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
if m.inter == inter && m._type == typ { if m.inter == inter && m._type == typ {
if m.bad != 0 { if m.bad != 0 {
m = nil m = nil
...@@ -76,7 +76,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { ...@@ -76,7 +76,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
} }
} }
m = (*itab)(gopersistentalloc(unsafe.Sizeof(itab{}) + uintptr(len(inter.mhdr))*ptrSize)) m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr))*ptrSize, 0, &memstats.other_sys))
m.inter = inter m.inter = inter
m._type = typ m._type = typ
...@@ -118,7 +118,7 @@ search: ...@@ -118,7 +118,7 @@ search:
gothrow("invalid itab locking") gothrow("invalid itab locking")
} }
m.link = hash[h] m.link = hash[h]
goatomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m)) atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
gounlock(&ifaceLock) gounlock(&ifaceLock)
if m.bad != 0 { if m.bad != 0 {
return nil return nil
...@@ -128,7 +128,7 @@ search: ...@@ -128,7 +128,7 @@ search:
func typ2Itab(t *_type, inter *interfacetype, cache **itab) *itab { func typ2Itab(t *_type, inter *interfacetype, cache **itab) *itab {
tab := getitab(inter, t, false) tab := getitab(inter, t, false)
goatomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab)) atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
return tab return tab
} }
...@@ -150,10 +150,10 @@ func convT2E(t *_type, elem unsafe.Pointer) (e interface{}) { ...@@ -150,10 +150,10 @@ func convT2E(t *_type, elem unsafe.Pointer) (e interface{}) {
} }
func convT2I(t *_type, inter *interfacetype, cache **itab, elem unsafe.Pointer) (i fInterface) { func convT2I(t *_type, inter *interfacetype, cache **itab, elem unsafe.Pointer) (i fInterface) {
tab := (*itab)(goatomicloadp(unsafe.Pointer(cache))) tab := (*itab)(atomicloadp(unsafe.Pointer(cache)))
if tab == nil { if tab == nil {
tab = getitab(inter, t, false) tab = getitab(inter, t, false)
goatomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab)) atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
} }
size := uintptr(t.size) size := uintptr(t.size)
pi := (*iface)(unsafe.Pointer(&i)) pi := (*iface)(unsafe.Pointer(&i))
......
...@@ -37,6 +37,9 @@ const ( ...@@ -37,6 +37,9 @@ const (
bitMask = bitBoundary | bitMarked bitMask = bitBoundary | bitMarked
) )
// Page number (address>>pageShift)
type pageID uintptr
// All zero-sized allocations return a pointer to this byte. // All zero-sized allocations return a pointer to this byte.
var zeroObject byte var zeroObject byte
...@@ -64,7 +67,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -64,7 +67,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
} }
mp.mallocing = 1 mp.mallocing = 1
if mp.curg != nil { if mp.curg != nil {
mp.curg.stackguard0 = ^uint(0xfff) | 0xbad mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad
} }
} }
...@@ -119,7 +122,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -119,7 +122,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
// The object fits into existing tiny block. // The object fits into existing tiny block.
x = tiny x = tiny
c.tiny = (*byte)(add(x, size)) c.tiny = (*byte)(add(x, size))
c.tinysize -= uint(size1) c.tinysize -= uintptr(size1)
if debugMalloc { if debugMalloc {
mp := acquirem() mp := acquirem()
if mp.mallocing == 0 { if mp.mallocing == 0 {
...@@ -156,7 +159,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -156,7 +159,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
// based on amount of remaining free space. // based on amount of remaining free space.
if maxTinySize-size > tinysize { if maxTinySize-size > tinysize {
c.tiny = (*byte)(add(x, size)) c.tiny = (*byte)(add(x, size))
c.tinysize = uint(maxTinySize - size) c.tinysize = uintptr(maxTinySize - size)
} }
size = maxTinySize size = maxTinySize
} else { } else {
...@@ -171,7 +174,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -171,7 +174,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
v := s.freelist v := s.freelist
if v == nil { if v == nil {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(sizeclass) mp.scalararg[0] = uintptr(sizeclass)
onM(&mcacheRefill_m) onM(&mcacheRefill_m)
releasem(mp) releasem(mp)
s = c.alloc[sizeclass] s = c.alloc[sizeclass]
...@@ -188,11 +191,11 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -188,11 +191,11 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
} }
} }
} }
c.local_cachealloc += int(size) c.local_cachealloc += intptr(size)
} else { } else {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(size) mp.scalararg[0] = uintptr(size)
mp.scalararg[1] = uint(flags) mp.scalararg[1] = uintptr(flags)
onM(&largeAlloc_m) onM(&largeAlloc_m)
s = (*mspan)(mp.ptrarg[0]) s = (*mspan)(mp.ptrarg[0])
mp.ptrarg[0] = nil mp.ptrarg[0] = nil
...@@ -241,15 +244,15 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -241,15 +244,15 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = x mp.ptrarg[0] = x
mp.ptrarg[1] = unsafe.Pointer(typ) mp.ptrarg[1] = unsafe.Pointer(typ)
mp.scalararg[0] = uint(size) mp.scalararg[0] = uintptr(size)
mp.scalararg[1] = uint(size0) mp.scalararg[1] = uintptr(size0)
onM(&unrollgcproginplace_m) onM(&unrollgcproginplace_m)
releasem(mp) releasem(mp)
goto marked goto marked
} }
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0]))) ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
// Check whether the program is already unrolled. // Check whether the program is already unrolled.
if uintptr(goatomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(typ) mp.ptrarg[0] = unsafe.Pointer(typ)
onM(&unrollgcprog_m) onM(&unrollgcprog_m)
...@@ -394,7 +397,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { ...@@ -394,7 +397,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
} }
c.next_sample = next c.next_sample = next
} }
mp.scalararg[0] = uint(size) mp.scalararg[0] = uintptr(size)
mp.ptrarg[0] = x mp.ptrarg[0] = x
onM(&mprofMalloc_m) onM(&mprofMalloc_m)
} }
...@@ -402,7 +405,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { ...@@ -402,7 +405,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
// force = 1 - do GC regardless of current heap usage // force = 1 - do GC regardless of current heap usage
// force = 2 - go GC and eager sweep // force = 2 - go GC and eager sweep
func gogc(force int32) { func gogc(force int32) {
if memstats.enablegc == 0 { if !memstats.enablegc {
return return
} }
...@@ -421,7 +424,7 @@ func gogc(force int32) { ...@@ -421,7 +424,7 @@ func gogc(force int32) {
if gcpercent == gcpercentUnknown { if gcpercent == gcpercentUnknown {
golock(&mheap_.lock) golock(&mheap_.lock)
if gcpercent == gcpercentUnknown { if gcpercent == gcpercentUnknown {
gcpercent = goreadgogc() gcpercent = readgogc()
} }
gounlock(&mheap_.lock) gounlock(&mheap_.lock)
} }
...@@ -439,7 +442,7 @@ func gogc(force int32) { ...@@ -439,7 +442,7 @@ func gogc(force int32) {
} }
// Ok, we're doing it! Stop everybody else // Ok, we're doing it! Stop everybody else
startTime := gonanotime() startTime := nanotime()
mp = acquirem() mp = acquirem()
mp.gcing = 1 mp.gcing = 1
releasem(mp) releasem(mp)
...@@ -461,11 +464,11 @@ func gogc(force int32) { ...@@ -461,11 +464,11 @@ func gogc(force int32) {
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
if i > 0 { if i > 0 {
startTime = gonanotime() startTime = nanotime()
} }
// switch to g0, call gc, then switch back // switch to g0, call gc, then switch back
mp.scalararg[0] = uint(uint32(startTime)) // low 32 bits mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits
mp.scalararg[1] = uint(startTime >> 32) // high 32 bits mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits
if force >= 2 { if force >= 2 {
mp.scalararg[2] = 1 // eagersweep mp.scalararg[2] = 1 // eagersweep
} else { } else {
......
...@@ -93,7 +93,7 @@ enum ...@@ -93,7 +93,7 @@ enum
PageSize = 1<<PageShift, PageSize = 1<<PageShift,
PageMask = PageSize - 1, PageMask = PageSize - 1,
}; };
typedef uintptr PageID; // address >> PageShift typedef uintptr pageID; // address >> PageShift
enum enum
{ {
...@@ -403,7 +403,7 @@ struct MSpan ...@@ -403,7 +403,7 @@ struct MSpan
{ {
MSpan *next; // in a span linked list MSpan *next; // in a span linked list
MSpan *prev; // in a span linked list MSpan *prev; // in a span linked list
PageID start; // starting page number pageID start; // starting page number
uintptr npages; // number of pages in span uintptr npages; // number of pages in span
MLink *freelist; // list of free objects MLink *freelist; // list of free objects
// sweep generation: // sweep generation:
...@@ -425,7 +425,7 @@ struct MSpan ...@@ -425,7 +425,7 @@ struct MSpan
Special *specials; // linked list of special records sorted by offset. Special *specials; // linked list of special records sorted by offset.
}; };
void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages); void runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages);
void runtime·MSpan_EnsureSwept(MSpan *span); void runtime·MSpan_EnsureSwept(MSpan *span);
bool runtime·MSpan_Sweep(MSpan *span, bool preserve); bool runtime·MSpan_Sweep(MSpan *span, bool preserve);
......
...@@ -225,7 +225,7 @@ scanblock(byte *b, uintptr n, byte *ptrmask) ...@@ -225,7 +225,7 @@ scanblock(byte *b, uintptr n, byte *ptrmask)
Eface *eface; Eface *eface;
Type *typ; Type *typ;
MSpan *s; MSpan *s;
PageID k; pageID k;
bool keepworking; bool keepworking;
// Cache memory arena parameters in local vars. // Cache memory arena parameters in local vars.
......
...@@ -279,7 +279,7 @@ MHeap_AllocSpanLocked(MHeap *h, uintptr npage) ...@@ -279,7 +279,7 @@ MHeap_AllocSpanLocked(MHeap *h, uintptr npage)
{ {
uintptr n; uintptr n;
MSpan *s, *t; MSpan *s, *t;
PageID p; pageID p;
// Try in fixed-size lists up to max. // Try in fixed-size lists up to max.
for(n=npage; n < nelem(h->free); n++) { for(n=npage; n < nelem(h->free); n++) {
...@@ -380,7 +380,7 @@ MHeap_Grow(MHeap *h, uintptr npage) ...@@ -380,7 +380,7 @@ MHeap_Grow(MHeap *h, uintptr npage)
uintptr ask; uintptr ask;
void *v; void *v;
MSpan *s; MSpan *s;
PageID p; pageID p;
// Ask for a big chunk, to reduce the number of mappings // Ask for a big chunk, to reduce the number of mappings
// the operating system needs to track; also amortizes // the operating system needs to track; also amortizes
...@@ -441,7 +441,7 @@ MSpan* ...@@ -441,7 +441,7 @@ MSpan*
runtime·MHeap_LookupMaybe(MHeap *h, void *v) runtime·MHeap_LookupMaybe(MHeap *h, void *v)
{ {
MSpan *s; MSpan *s;
PageID p, q; pageID p, q;
if((byte*)v < h->arena_start || (byte*)v >= h->arena_used) if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
return nil; return nil;
...@@ -514,7 +514,7 @@ static void ...@@ -514,7 +514,7 @@ static void
MHeap_FreeSpanLocked(MHeap *h, MSpan *s, bool acctinuse, bool acctidle) MHeap_FreeSpanLocked(MHeap *h, MSpan *s, bool acctinuse, bool acctidle)
{ {
MSpan *t; MSpan *t;
PageID p; pageID p;
switch(s->state) { switch(s->state) {
case MSpanStack: case MSpanStack:
...@@ -639,7 +639,7 @@ runtime∕debug·freeOSMemory(void) ...@@ -639,7 +639,7 @@ runtime∕debug·freeOSMemory(void)
// Initialize a new span with the given start and npages. // Initialize a new span with the given start and npages.
void void
runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages) runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages)
{ {
span->next = nil; span->next = nil;
span->prev = nil; span->prev = nil;
......
...@@ -98,10 +98,10 @@ func record(r *MemProfileRecord, b *bucket) { ...@@ -98,10 +98,10 @@ func record(r *MemProfileRecord, b *bucket) {
r.FreeBytes = int64(b.data.mp.free_bytes) r.FreeBytes = int64(b.data.mp.free_bytes)
r.AllocObjects = int64(b.data.mp.allocs) r.AllocObjects = int64(b.data.mp.allocs)
r.FreeObjects = int64(b.data.mp.frees) r.FreeObjects = int64(b.data.mp.frees)
for i := 0; uint(i) < b.nstk && i < len(r.Stack0); i++ { for i := 0; uintptr(i) < b.nstk && i < len(r.Stack0); i++ {
r.Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize)) r.Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize))
} }
for i := b.nstk; i < uint(len(r.Stack0)); i++ { for i := b.nstk; i < uintptr(len(r.Stack0)); i++ {
r.Stack0[i] = 0 r.Stack0[i] = 0
} }
} }
...@@ -126,7 +126,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { ...@@ -126,7 +126,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
p[idx].Count = int64(bp.count) p[idx].Count = int64(bp.count)
p[idx].Cycles = int64(bp.cycles) p[idx].Cycles = int64(bp.cycles)
i := 0 i := 0
for uint(i) < b.nstk && i < len(p[idx].Stack0) { for uintptr(i) < b.nstk && i < len(p[idx].Stack0) {
p[idx].Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize)) p[idx].Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize))
i++ i++
} }
...@@ -146,8 +146,8 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { ...@@ -146,8 +146,8 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
// If all is true, Stack formats stack traces of all other goroutines // If all is true, Stack formats stack traces of all other goroutines
// into buf after the trace for the current goroutine. // into buf after the trace for the current goroutine.
func Stack(buf []byte, all bool) int { func Stack(buf []byte, all bool) int {
sp := gogetcallersp(unsafe.Pointer(&buf)) sp := getcallersp(unsafe.Pointer(&buf))
pc := gogetcallerpc(unsafe.Pointer(&buf)) pc := getcallerpc(unsafe.Pointer(&buf))
mp := acquirem() mp := acquirem()
gp := mp.curg gp := mp.curg
if all { if all {
...@@ -190,7 +190,7 @@ func Stack(buf []byte, all bool) int { ...@@ -190,7 +190,7 @@ func Stack(buf []byte, all bool) int {
// Most clients should use the runtime/pprof package instead // Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly. // of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
first := (*m)(goatomicloadp(unsafe.Pointer(&allm))) first := (*m)(atomicloadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink { for mp := first; mp != nil; mp = mp.alllink {
n++ n++
} }
......
...@@ -229,12 +229,6 @@ runtime·blockevent(int64 cycles, int32 skip) ...@@ -229,12 +229,6 @@ runtime·blockevent(int64 cycles, int32 skip)
runtime·unlock(&runtime·proflock); runtime·unlock(&runtime·proflock);
} }
void
runtime·blockevent_m(void)
{
runtime·blockevent(g->m->scalararg[0] + ((int64)g->m->scalararg[1]<<32), g->m->scalararg[2]);
}
void void
runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr)) runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr))
{ {
......
...@@ -19,7 +19,7 @@ var ( ...@@ -19,7 +19,7 @@ var (
func printstring(s string) { func printstring(s string) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(len(s)) mp.scalararg[0] = uintptr(len(s))
mp.ptrarg[0] = (*stringStruct)(unsafe.Pointer(&s)).str mp.ptrarg[0] = (*stringStruct)(unsafe.Pointer(&s)).str
onM(&printstring_m) onM(&printstring_m)
releasem(mp) releasem(mp)
...@@ -34,7 +34,7 @@ func printuint(x uint64) { ...@@ -34,7 +34,7 @@ func printuint(x uint64) {
func printhex(x uintptr) { func printhex(x uintptr) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(x) mp.scalararg[0] = uintptr(x)
onM(&printhex_m) onM(&printhex_m)
releasem(mp) releasem(mp)
} }
......
...@@ -50,7 +50,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) { ...@@ -50,7 +50,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
gothrow("gopark: bad g status") gothrow("gopark: bad g status")
} }
mp.waitlock = lock mp.waitlock = lock
mp.waitunlockf = *(*func(*g, unsafe.Pointer) uint8)(unsafe.Pointer(&unlockf)) mp.waitunlockf = *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&unlockf))
gp.waitreason = reason gp.waitreason = reason
releasem(mp) releasem(mp)
// can't do anything that might move the G between Ms here. // can't do anything that might move the G between Ms here.
...@@ -70,16 +70,6 @@ func goready(gp *g) { ...@@ -70,16 +70,6 @@ func goready(gp *g) {
releasem(mp) releasem(mp)
} }
func goblockevent(cycles int64, skip int32) {
// TODO: convert to Go when we do mprof.goc
mp := acquirem()
mp.scalararg[0] = uint(uint32(cycles))
mp.scalararg[1] = uint(cycles >> 32)
mp.scalararg[2] = uint(skip)
onM(&blockevent_m)
releasem(mp)
}
//go:nosplit //go:nosplit
func acquireSudog() *sudog { func acquireSudog() *sudog {
c := gomcache() c := gomcache()
......
...@@ -6,27 +6,22 @@ package runtime ...@@ -6,27 +6,22 @@ package runtime
func setMaxStack(in int) (out int) { func setMaxStack(in int) (out int) {
out = int(maxstacksize) out = int(maxstacksize)
maxstacksize = uint(in) maxstacksize = uintptr(in)
return out return out
} }
func setGCPercent(in int32) (out int32) { func setGCPercent(in int32) (out int32) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(int(in)) mp.scalararg[0] = uintptr(int(in))
onM(&setgcpercent_m) onM(&setgcpercent_m)
out = int32(int(mp.scalararg[0])) out = int32(int(mp.scalararg[0]))
releasem(mp) releasem(mp)
return out return out
} }
func setPanicOnFault(newb bool) (old bool) { func setPanicOnFault(new bool) (old bool) {
new := uint8(0)
if newb {
new = 1
}
mp := acquirem() mp := acquirem()
old = mp.curg.paniconfault == 1 old = mp.curg.paniconfault
mp.curg.paniconfault = new mp.curg.paniconfault = new
releasem(mp) releasem(mp)
return old return old
...@@ -34,7 +29,7 @@ func setPanicOnFault(newb bool) (old bool) { ...@@ -34,7 +29,7 @@ func setPanicOnFault(newb bool) (old bool) {
func setMaxThreads(in int) (out int) { func setMaxThreads(in int) (out int) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(in) mp.scalararg[0] = uintptr(in)
onM(&setmaxthreads_m) onM(&setmaxthreads_m)
out = int(mp.scalararg[0]) out = int(mp.scalararg[0])
releasem(mp) releasem(mp)
......
...@@ -22,17 +22,10 @@ typedef int64 intptr; ...@@ -22,17 +22,10 @@ typedef int64 intptr;
typedef int64 intgo; // Go's int typedef int64 intgo; // Go's int
typedef uint64 uintgo; // Go's uint typedef uint64 uintgo; // Go's uint
#else #else
// Normally, "int" == "long int" == 32 bits. typedef uint32 uintptr;
// However, the C compiler uses this distinction typedef int32 intptr;
// to disambiguate true 32 bit ints (e.g. int32) typedef int32 intgo; // Go's int
// from 32/64 bit ints (e.g. uintptr) so that it typedef uint32 uintgo; // Go's uint
// can generate the corresponding go type correctly.
typedef signed long int int32_x;
typedef unsigned long int uint32_x;
typedef uint32_x uintptr;
typedef int32_x intptr;
typedef int32_x intgo; // Go's int
typedef uint32_x uintgo; // Go's uint
#endif #endif
#ifdef _64BITREG #ifdef _64BITREG
......
...@@ -65,16 +65,16 @@ func semacquire(addr *uint32, profile bool) { ...@@ -65,16 +65,16 @@ func semacquire(addr *uint32, profile bool) {
t0 := int64(0) t0 := int64(0)
s.releasetime = 0 s.releasetime = 0
if profile && blockprofilerate > 0 { if profile && blockprofilerate > 0 {
t0 = gocputicks() t0 = cputicks()
s.releasetime = -1 s.releasetime = -1
} }
for { for {
golock(&root.lock) golock(&root.lock)
// Add ourselves to nwait to disable "easy case" in semrelease. // Add ourselves to nwait to disable "easy case" in semrelease.
goxadd(&root.nwait, 1) xadd(&root.nwait, 1)
// Check cansemacquire to avoid missed wakeup. // Check cansemacquire to avoid missed wakeup.
if cansemacquire(addr) { if cansemacquire(addr) {
goxadd(&root.nwait, ^uint32(0)) xadd(&root.nwait, -1)
gounlock(&root.lock) gounlock(&root.lock)
break break
} }
...@@ -87,25 +87,25 @@ func semacquire(addr *uint32, profile bool) { ...@@ -87,25 +87,25 @@ func semacquire(addr *uint32, profile bool) {
} }
} }
if s.releasetime > 0 { if s.releasetime > 0 {
goblockevent(int64(s.releasetime)-t0, 4) blockevent(int64(s.releasetime)-t0, 3)
} }
releaseSudog(s) releaseSudog(s)
} }
func semrelease(addr *uint32) { func semrelease(addr *uint32) {
root := semroot(addr) root := semroot(addr)
goxadd(addr, 1) xadd(addr, 1)
// Easy case: no waiters? // Easy case: no waiters?
// This check must happen after the xadd, to avoid a missed wakeup // This check must happen after the xadd, to avoid a missed wakeup
// (see loop in semacquire). // (see loop in semacquire).
if goatomicload(&root.nwait) == 0 { if atomicload(&root.nwait) == 0 {
return return
} }
// Harder case: search for a waiter and wake it. // Harder case: search for a waiter and wake it.
golock(&root.lock) golock(&root.lock)
if goatomicload(&root.nwait) == 0 { if atomicload(&root.nwait) == 0 {
// The count is already consumed by another goroutine, // The count is already consumed by another goroutine,
// so no need to wake up another goroutine. // so no need to wake up another goroutine.
gounlock(&root.lock) gounlock(&root.lock)
...@@ -114,7 +114,7 @@ func semrelease(addr *uint32) { ...@@ -114,7 +114,7 @@ func semrelease(addr *uint32) {
s := root.head s := root.head
for ; s != nil; s = s.next { for ; s != nil; s = s.next {
if s.elem == unsafe.Pointer(addr) { if s.elem == unsafe.Pointer(addr) {
goxadd(&root.nwait, ^uint32(0)) xadd(&root.nwait, -1)
root.dequeue(s) root.dequeue(s)
break break
} }
...@@ -122,9 +122,7 @@ func semrelease(addr *uint32) { ...@@ -122,9 +122,7 @@ func semrelease(addr *uint32) {
gounlock(&root.lock) gounlock(&root.lock)
if s != nil { if s != nil {
if s.releasetime != 0 { if s.releasetime != 0 {
// TODO: Remove use of unsafe here. s.releasetime = cputicks()
releasetimep := (*int64)(unsafe.Pointer(&s.releasetime))
*releasetimep = gocputicks()
} }
goready(s.g) goready(s.g)
} }
...@@ -136,11 +134,11 @@ func semroot(addr *uint32) *semaRoot { ...@@ -136,11 +134,11 @@ func semroot(addr *uint32) *semaRoot {
func cansemacquire(addr *uint32) bool { func cansemacquire(addr *uint32) bool {
for { for {
v := goatomicload(addr) v := atomicload(addr)
if v == 0 { if v == 0 {
return false return false
} }
if gocas(addr, v, v-1) { if cas(addr, v, v-1) {
return true return true
} }
} }
...@@ -208,7 +206,7 @@ func syncsemacquire(s *syncSema) { ...@@ -208,7 +206,7 @@ func syncsemacquire(s *syncSema) {
w.releasetime = 0 w.releasetime = 0
t0 := int64(0) t0 := int64(0)
if blockprofilerate > 0 { if blockprofilerate > 0 {
t0 = gocputicks() t0 = cputicks()
w.releasetime = -1 w.releasetime = -1
} }
if s.tail == nil { if s.tail == nil {
...@@ -219,7 +217,7 @@ func syncsemacquire(s *syncSema) { ...@@ -219,7 +217,7 @@ func syncsemacquire(s *syncSema) {
s.tail = w s.tail = w
goparkunlock(&s.lock, "semacquire") goparkunlock(&s.lock, "semacquire")
if t0 != 0 { if t0 != 0 {
goblockevent(int64(w.releasetime)-t0, 3) blockevent(int64(w.releasetime)-t0, 2)
} }
releaseSudog(w) releaseSudog(w)
} }
...@@ -236,9 +234,7 @@ func syncsemrelease(s *syncSema, n uint32) { ...@@ -236,9 +234,7 @@ func syncsemrelease(s *syncSema, n uint32) {
s.tail = nil s.tail = nil
} }
if wake.releasetime != 0 { if wake.releasetime != 0 {
// TODO: Remove use of unsafe here. wake.releasetime = cputicks()
releasetimep := (*int64)(unsafe.Pointer(&wake.releasetime))
*releasetimep = gocputicks()
} }
goready(wake.g) goready(wake.g)
n-- n--
......
...@@ -16,21 +16,21 @@ func signal_recv() (m uint32) { ...@@ -16,21 +16,21 @@ func signal_recv() (m uint32) {
if ok { if ok {
return return
} }
gonotetsleepg(&signote, -1) notetsleepg(&signote, -1)
gonoteclear(&signote) noteclear(&signote)
} }
} }
func signal_enable(s uint32) { func signal_enable(s uint32) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(s) mp.scalararg[0] = uintptr(s)
onM(&signal_enable_m) onM(&signal_enable_m)
releasem(mp) releasem(mp)
} }
func signal_disable(s uint32) { func signal_disable(s uint32) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uint(s) mp.scalararg[0] = uintptr(s)
onM(&signal_disable_m) onM(&signal_disable_m)
releasem(mp) releasem(mp)
} }
......
...@@ -47,7 +47,7 @@ func growslice(t *slicetype, old sliceStruct, n int64) sliceStruct { ...@@ -47,7 +47,7 @@ func growslice(t *slicetype, old sliceStruct, n int64) sliceStruct {
} }
if raceenabled { if raceenabled {
callerpc := gogetcallerpc(unsafe.Pointer(&t)) callerpc := getcallerpc(unsafe.Pointer(&t))
fn := growslice fn := growslice
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadrangepc(old.array, old.len*int(t.elem.size), callerpc, pc) racereadrangepc(old.array, old.len*int(t.elem.size), callerpc, pc)
...@@ -104,7 +104,7 @@ func slicecopy(to sliceStruct, fm sliceStruct, width uintptr) int { ...@@ -104,7 +104,7 @@ func slicecopy(to sliceStruct, fm sliceStruct, width uintptr) int {
} }
if raceenabled { if raceenabled {
callerpc := gogetcallerpc(unsafe.Pointer(&to)) callerpc := getcallerpc(unsafe.Pointer(&to))
fn := slicecopy fn := slicecopy
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racewriterangepc(to.array, n*int(width), callerpc, pc) racewriterangepc(to.array, n*int(width), callerpc, pc)
...@@ -132,7 +132,7 @@ func slicestringcopy(to []byte, fm string) int { ...@@ -132,7 +132,7 @@ func slicestringcopy(to []byte, fm string) int {
} }
if raceenabled { if raceenabled {
callerpc := gogetcallerpc(unsafe.Pointer(&to)) callerpc := getcallerpc(unsafe.Pointer(&to))
fn := slicestringcopy fn := slicestringcopy
pc := **(**uintptr)(unsafe.Pointer(&fn)) pc := **(**uintptr)(unsafe.Pointer(&fn))
racewriterangepc(unsafe.Pointer(&to[0]), n, callerpc, pc) racewriterangepc(unsafe.Pointer(&to[0]), n, callerpc, pc)
......
...@@ -64,7 +64,7 @@ func slicebytetostring(b []byte) string { ...@@ -64,7 +64,7 @@ func slicebytetostring(b []byte) string {
fn := slicebytetostring fn := slicebytetostring
racereadrangepc(unsafe.Pointer(&b[0]), racereadrangepc(unsafe.Pointer(&b[0]),
len(b), len(b),
gogetcallerpc(unsafe.Pointer(&b)), getcallerpc(unsafe.Pointer(&b)),
**(**uintptr)(unsafe.Pointer(&fn))) **(**uintptr)(unsafe.Pointer(&fn)))
} }
s, c := rawstring(len(b)) s, c := rawstring(len(b))
...@@ -85,7 +85,7 @@ func slicebytetostringtmp(b []byte) string { ...@@ -85,7 +85,7 @@ func slicebytetostringtmp(b []byte) string {
fn := slicebytetostringtmp fn := slicebytetostringtmp
racereadrangepc(unsafe.Pointer(&b[0]), racereadrangepc(unsafe.Pointer(&b[0]),
len(b), len(b),
gogetcallerpc(unsafe.Pointer(&b)), getcallerpc(unsafe.Pointer(&b)),
**(**uintptr)(unsafe.Pointer(&fn))) **(**uintptr)(unsafe.Pointer(&fn)))
} }
return *(*string)(unsafe.Pointer(&b)) return *(*string)(unsafe.Pointer(&b))
...@@ -123,7 +123,7 @@ func slicerunetostring(a []rune) string { ...@@ -123,7 +123,7 @@ func slicerunetostring(a []rune) string {
fn := slicerunetostring fn := slicerunetostring
racereadrangepc(unsafe.Pointer(&a[0]), racereadrangepc(unsafe.Pointer(&a[0]),
len(a)*int(unsafe.Sizeof(a[0])), len(a)*int(unsafe.Sizeof(a[0])),
gogetcallerpc(unsafe.Pointer(&a)), getcallerpc(unsafe.Pointer(&a)),
**(**uintptr)(unsafe.Pointer(&fn))) **(**uintptr)(unsafe.Pointer(&fn)))
} }
var dum [4]byte var dum [4]byte
...@@ -219,7 +219,7 @@ func rawstring(size int) (s string, b []byte) { ...@@ -219,7 +219,7 @@ func rawstring(size int) (s string, b []byte) {
for { for {
ms := maxstring ms := maxstring
if uintptr(size) <= uintptr(ms) || gocasx((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) { if uintptr(size) <= uintptr(ms) || casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) {
return return
} }
} }
......
...@@ -15,12 +15,6 @@ const ( ...@@ -15,12 +15,6 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil)) ptrSize = unsafe.Sizeof((*byte)(nil))
) )
//go:noescape
func gogetcallerpc(p unsafe.Pointer) uintptr
//go:noescape
func gogetcallersp(p unsafe.Pointer) uintptr
//go:noescape //go:noescape
func racereadpc(addr unsafe.Pointer, callpc, pc uintptr) func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
...@@ -91,11 +85,12 @@ var ( ...@@ -91,11 +85,12 @@ var (
setmaxthreads_m, setmaxthreads_m,
ready_m, ready_m,
park_m, park_m,
blockevent_m,
notewakeup_m, notewakeup_m,
notetsleepg_m mFunction notetsleepg_m mFunction
) )
func blockevent(int64, int32)
// memclr clears n bytes starting at ptr. // memclr clears n bytes starting at ptr.
// in memclr_*.s // in memclr_*.s
//go:noescape //go:noescape
...@@ -117,26 +112,6 @@ const ( ...@@ -117,26 +112,6 @@ const (
concurrentSweep = true concurrentSweep = true
) )
// Atomic operations to read/write a pointer.
// in stubs.goc
func goatomicload(p *uint32) uint32 // return *p
func goatomicloadp(p unsafe.Pointer) unsafe.Pointer // return *p
func goatomicstore(p *uint32, v uint32) // *p = v
func goatomicstorep(p unsafe.Pointer, v unsafe.Pointer) // *p = v
// in stubs.goc
// if *p == x { *p = y; return true } else { return false }, atomically
//go:noescape
func gocas(p *uint32, x uint32, y uint32) bool
//go:noescape
func goxadd(p *uint32, x uint32) uint32
//go:noescape
func gocasx(p *uintptr, x uintptr, y uintptr) bool
func goreadgogc() int32
func gonanotime() int64
func gosched() func gosched()
func starttheworld() func starttheworld()
func stoptheworld() func stoptheworld()
...@@ -187,33 +162,6 @@ func noescape(p unsafe.Pointer) unsafe.Pointer { ...@@ -187,33 +162,6 @@ func noescape(p unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(x ^ 0) return unsafe.Pointer(x ^ 0)
} }
// gopersistentalloc allocates a permanent (not garbage collected)
// memory region of size n. Use wisely!
func gopersistentalloc(n uintptr) unsafe.Pointer
func gocputicks() int64
func gonoteclear(n *note) {
n.key = 0
}
func gonotewakeup(n *note) {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(n)
onM(&notewakeup_m)
releasem(mp)
}
func gonotetsleepg(n *note, t int64) {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(n)
mp.scalararg[0] = uint(uint32(t)) // low 32 bits
mp.scalararg[1] = uint(t >> 32) // high 32 bits
releasem(mp)
mcall(&notetsleepg_m)
exitsyscall()
}
func exitsyscall() func exitsyscall()
func goroutineheader(gp *g) func goroutineheader(gp *g)
...@@ -231,22 +179,6 @@ func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32 ...@@ -231,22 +179,6 @@ func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
func jmpdefer(fv *funcval, argp unsafe.Pointer) func jmpdefer(fv *funcval, argp unsafe.Pointer)
func exit1(code int32) func exit1(code int32)
func asminit() func asminit()
func getcallersp(argp unsafe.Pointer) uintptr
func cas(ptr *uint32, old, new uint32) bool
func cas64(ptr *uint64, old, new uint64) bool
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
func xadd(ptr *uint32, delta int32) uint32
func xadd64(ptr *uint64, delta int64) uint64
func xchg(ptr *uint32, new uint32) uint32
func xchg64(ptr *uint64, new uint64) uint64
func xchgp(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
func atomicstore(ptr *uint32, val uint32)
func atomicstore64(ptr *uint64, val uint64)
func atomicstorep(ptr *unsafe.Pointer, val unsafe.Pointer)
func atomicload(ptr *uint32) uint32
func atomicload64(ptr *uint64) uint64
func atomicloadp(ptr *unsafe.Pointer) unsafe.Pointer
func atomicor8(ptr *uint8, val uint8)
func setg(gg *g) func setg(gg *g)
func exit(code int32) func exit(code int32)
func breakpoint() func breakpoint()
...@@ -257,10 +189,72 @@ func cputicks() int64 ...@@ -257,10 +189,72 @@ func cputicks() int64
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
func munmap(addr unsafe.Pointer, n uintptr) func munmap(addr unsafe.Pointer, n uintptr)
func madvise(addr unsafe.Pointer, n uintptr, flags int32) func madvise(addr unsafe.Pointer, n uintptr, flags int32)
func setcallerpc(argp unsafe.Pointer, pc uintptr)
func getcallerpc(argp unsafe.Pointer) uintptr
func newstackcall(fv *funcval, addr unsafe.Pointer, size uint32) func newstackcall(fv *funcval, addr unsafe.Pointer, size uint32)
func procyield(cycles uint32) func procyield(cycles uint32)
func osyield() func osyield()
func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr) func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
func cmpstring(s1, s2 string) int func cmpstring(s1, s2 string) int
func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer
func readgogc() int32
func notetsleepg(n *note, ns int64)
func notetsleep(n *note, ns int64)
func notewakeup(n *note)
func notesleep(n *note)
func noteclear(n *note)
//go:noescape
func cas(ptr *uint32, old, new uint32) bool
//go:noescape
func cas64(ptr *uint64, old, new uint64) bool
//go:noescape
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
//go:noescape
func casuintptr(ptr *uintptr, old, new uintptr) bool
//go:noescape
func xadd(ptr *uint32, delta int32) uint32
//go:noescape
func xadd64(ptr *uint64, delta int64) uint64
//go:noescape
func xchg(ptr *uint32, new uint32) uint32
//go:noescape
func xchg64(ptr *uint64, new uint64) uint64
//go:noescape
func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
//go:noescape
func atomicstore(ptr *uint32, val uint32)
//go:noescape
func atomicstore64(ptr *uint64, val uint64)
//go:noescape
func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer)
//go:noescape
func atomicload(ptr *uint32) uint32
//go:noescape
func atomicload64(ptr *uint64) uint64
//go:noescape
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
//go:noescape
func atomicor8(ptr *uint8, val uint8)
//go:noescape
func setcallerpc(argp unsafe.Pointer, pc uintptr)
//go:noescape
func getcallerpc(argp unsafe.Pointer) uintptr
//go:noescape
func getcallersp(argp unsafe.Pointer) uintptr
...@@ -32,57 +32,12 @@ func gounlock(p *Lock) { ...@@ -32,57 +32,12 @@ func gounlock(p *Lock) {
runtime·unlock(p); runtime·unlock(p);
} }
#pragma textflag NOSPLIT
func goreadgogc() (r int32) {
r = runtime·readgogc();
}
// entry point for testing // entry point for testing
// TODO: mcall and run on M stack // TODO: mcall and run on M stack
func gostringW(str Slice) (s String) { func gostringW(str Slice) (s String) {
s = runtime·gostringw((uint16*)str.array); s = runtime·gostringw((uint16*)str.array);
} }
#pragma textflag NOSPLIT
func gonanotime() (r int64) {
r = runtime·nanotime();
}
#pragma textflag NOSPLIT
func goatomicload(p *uint32) (v uint32) {
v = runtime·atomicload(p);
}
#pragma textflag NOSPLIT
func goatomicloadp(p **byte) (v *byte) {
v = runtime·atomicloadp(p);
}
#pragma textflag NOSPLIT
func goatomicstore(p *uint32, v uint32) {
runtime·atomicstore(p, v);
}
#pragma textflag NOSPLIT
func goatomicstorep(p **byte, v *byte) {
runtime·atomicstorep(p, v);
}
#pragma textflag NOSPLIT
func runtime·goxadd(p *uint32, x uint32) (ret uint32) {
ret = runtime·xadd(p, x);
}
#pragma textflag NOSPLIT
func runtime·gocas(p *uint32, x uint32, y uint32) (ret bool) {
ret = runtime·cas(p, x, y);
}
#pragma textflag NOSPLIT
func runtime·gocasx(p *uintptr, x uintptr, y uintptr) (ret bool) {
ret = runtime·casp((void**)p, (void*)x, (void*)y);
}
#pragma textflag NOSPLIT #pragma textflag NOSPLIT
func runtime·getg() (ret *G) { func runtime·getg() (ret *G) {
ret = g; ret = g;
...@@ -115,12 +70,6 @@ func GCMask(x Eface) (mask Slice) { ...@@ -115,12 +70,6 @@ func GCMask(x Eface) (mask Slice) {
mask.cap = mask.len; mask.cap = mask.len;
} }
#pragma textflag NOSPLIT
func gopersistentalloc(size uintptr) (x *void) {
// TODO: used only for itabs for now. Need to make &mstats.other_sys arg parameterized.
x = runtime·persistentalloc(size, 0, &mstats.other_sys);
}
#pragma textflag NOSPLIT #pragma textflag NOSPLIT
func reflect·typelinks() (ret Slice) { func reflect·typelinks() (ret Slice) {
extern Type *runtime·typelink[], *runtime·etypelink[]; extern Type *runtime·typelink[], *runtime·etypelink[];
......
...@@ -15,15 +15,11 @@ type callbacks struct { ...@@ -15,15 +15,11 @@ type callbacks struct {
} }
func (c *wincallbackcontext) isCleanstack() bool { func (c *wincallbackcontext) isCleanstack() bool {
return c.cleanstack == 1 return c.cleanstack
} }
func (c *wincallbackcontext) setCleanstack(cleanstack bool) { func (c *wincallbackcontext) setCleanstack(cleanstack bool) {
if cleanstack { c.cleanstack = cleanstack
c.cleanstack = 1
} else {
c.cleanstack = 0
}
} }
var ( var (
...@@ -51,11 +47,11 @@ func compileCallback(fn eface, cleanstack bool) (code uintptr) { ...@@ -51,11 +47,11 @@ func compileCallback(fn eface, cleanstack bool) (code uintptr) {
if len(ft.out) != 1 { if len(ft.out) != 1 {
panic("compilecallback: function must have one output parameter") panic("compilecallback: function must have one output parameter")
} }
uintptrSize := uint(unsafe.Sizeof(uintptr(0))) uintptrSize := unsafe.Sizeof(uintptr(0))
if t := (**_type)(unsafe.Pointer(&ft.out[0])); (*t).size != uintptrSize { if t := (**_type)(unsafe.Pointer(&ft.out[0])); (*t).size != uintptrSize {
panic("compilecallback: output parameter size is wrong") panic("compilecallback: output parameter size is wrong")
} }
argsize := uint(0) argsize := uintptr(0)
for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] { for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] {
if (*t).size != uintptrSize { if (*t).size != uintptrSize {
panic("compilecallback: input parameter size is wrong") panic("compilecallback: input parameter size is wrong")
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#endif #endif
TEXT time·runtimeNano(SB),NOSPLIT,$0-0 TEXT time·runtimeNano(SB),NOSPLIT,$0-0
JMP runtime·gonanotime(SB) JMP runtime·nanotime(SB)
TEXT time·Sleep(SB),NOSPLIT,$0-0 TEXT time·Sleep(SB),NOSPLIT,$0-0
JMP runtime·timeSleep(SB) JMP runtime·timeSleep(SB)
......
...@@ -49,7 +49,7 @@ func timeSleep(ns int64) { ...@@ -49,7 +49,7 @@ func timeSleep(ns int64) {
} }
t := new(timer) t := new(timer)
t.when = gonanotime() + ns t.when = nanotime() + ns
t.f = goroutineReady t.f = goroutineReady
t.arg = getg() t.arg = getg()
golock(&timers.lock) golock(&timers.lock)
...@@ -100,7 +100,7 @@ func addtimerLocked(t *timer) { ...@@ -100,7 +100,7 @@ func addtimerLocked(t *timer) {
// siftup moved to top: new earliest deadline. // siftup moved to top: new earliest deadline.
if timers.sleeping { if timers.sleeping {
timers.sleeping = false timers.sleeping = false
gonotewakeup(&timers.waitnote) notewakeup(&timers.waitnote)
} }
if timers.rescheduling { if timers.rescheduling {
timers.rescheduling = false timers.rescheduling = false
...@@ -149,11 +149,11 @@ func deltimer(t *timer) bool { ...@@ -149,11 +149,11 @@ func deltimer(t *timer) bool {
// If addtimer inserts a new earlier event, addtimer1 wakes timerproc early. // If addtimer inserts a new earlier event, addtimer1 wakes timerproc early.
func timerproc() { func timerproc() {
timers.gp = getg() timers.gp = getg()
timers.gp.issystem = 1 timers.gp.issystem = true
for { for {
golock(&timers.lock) golock(&timers.lock)
timers.sleeping = false timers.sleeping = false
now := gonanotime() now := nanotime()
delta := int64(-1) delta := int64(-1)
for { for {
if len(timers.t) == 0 { if len(timers.t) == 0 {
...@@ -200,9 +200,9 @@ func timerproc() { ...@@ -200,9 +200,9 @@ func timerproc() {
} }
// At least one timer pending. Sleep until then. // At least one timer pending. Sleep until then.
timers.sleeping = true timers.sleeping = true
gonoteclear(&timers.waitnote) noteclear(&timers.waitnote)
gounlock(&timers.lock) gounlock(&timers.lock)
gonotetsleepg(&timers.waitnote, delta) notetsleepg(&timers.waitnote, delta)
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment