Commit 47d6af2f authored by Keith Randall's avatar Keith Randall

runtime: convert chanrecv to Go

LGTM=rsc, dvyukov
R=golang-codereviews, bradfitz, rsc, dvyukov
CC=golang-codereviews
https://golang.org/cl/136980044
parent 07d86b1f
...@@ -507,6 +507,9 @@ TEXT runtime·casuintptr(SB), NOSPLIT, $0-13 ...@@ -507,6 +507,9 @@ TEXT runtime·casuintptr(SB), NOSPLIT, $0-13
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-8 TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-8
JMP runtime·atomicload(SB) JMP runtime·atomicload(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-8
JMP runtime·atomicload(SB)
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new) // bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically: // Atomically:
// if(*val == *old){ // if(*val == *old){
......
...@@ -626,6 +626,9 @@ TEXT runtime·casuintptr(SB), NOSPLIT, $0-25 ...@@ -626,6 +626,9 @@ TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-16 TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-16
JMP runtime·atomicload64(SB) JMP runtime·atomicload64(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-16
JMP runtime·atomicload64(SB)
// bool casp(void **val, void *old, void *new) // bool casp(void **val, void *old, void *new)
// Atomically: // Atomically:
// if(*val == old){ // if(*val == old){
......
...@@ -567,6 +567,9 @@ TEXT runtime·casuintptr(SB), NOSPLIT, $0-17 ...@@ -567,6 +567,9 @@ TEXT runtime·casuintptr(SB), NOSPLIT, $0-17
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-12 TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-12
JMP runtime·atomicload(SB) JMP runtime·atomicload(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-12
JMP runtime·atomicload(SB)
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new) // bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically: // Atomically:
// if(*val == *old){ // if(*val == *old){
......
...@@ -695,6 +695,9 @@ TEXT runtime·casuintptr(SB), NOSPLIT, $0-13 ...@@ -695,6 +695,9 @@ TEXT runtime·casuintptr(SB), NOSPLIT, $0-13
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-8 TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-8
B runtime·atomicload(SB) B runtime·atomicload(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-8
B runtime·atomicload(SB)
TEXT runtime·stackguard(SB),NOSPLIT,$0-8 TEXT runtime·stackguard(SB),NOSPLIT,$0-8
MOVW R13, R1 MOVW R13, R1
MOVW g_stackguard(g), R2 MOVW g_stackguard(g), R2
......
...@@ -35,8 +35,8 @@ func makechan(t *chantype, size int64) *hchan { ...@@ -35,8 +35,8 @@ func makechan(t *chantype, size int64) *hchan {
if elem.kind&kindNoPointers != 0 || size == 0 { if elem.kind&kindNoPointers != 0 || size == 0 {
// Allocate memory in one call. // Allocate memory in one call.
// Hchan does not contain pointers interesting for GC in this case: // Hchan does not contain pointers interesting for GC in this case:
// buf points into the same allocation, elemtype is persistent // buf points into the same allocation, elemtype is persistent.
// and SudoG's are referenced from G so can't be collected. // SudoG's are referenced from their owning thread so they can't be collected.
// TODO(dvyukov,rlh): Rethink when collector can move allocated objects. // TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
c = (*hchan)(gomallocgc(hchanSize+uintptr(size)*uintptr(elem.size), nil, flagNoScan)) c = (*hchan)(gomallocgc(hchanSize+uintptr(size)*uintptr(elem.size), nil, flagNoScan))
if size > 0 && elem.size != 0 { if size > 0 && elem.size != 0 {
...@@ -53,7 +53,7 @@ func makechan(t *chantype, size int64) *hchan { ...@@ -53,7 +53,7 @@ func makechan(t *chantype, size int64) *hchan {
c.dataqsiz = uint(size) c.dataqsiz = uint(size)
if debugChan { if debugChan {
println("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size) print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n")
} }
return c return c
} }
...@@ -93,11 +93,11 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -93,11 +93,11 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
return false return false
} }
gopark(nil, nil, "chan send (nil chan)") gopark(nil, nil, "chan send (nil chan)")
return false // not reached gothrow("unreachable")
} }
if debugChan { if debugChan {
println("chansend: chan=", c) print("chansend: chan=", c, "\n")
} }
if raceenabled { if raceenabled {
...@@ -164,6 +164,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -164,6 +164,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
// no receiver available: block on this channel. // no receiver available: block on this channel.
gp := getg() gp := getg()
mysg := acquireSudog() mysg := acquireSudog()
mysg.releasetime = 0
if t0 != 0 { if t0 != 0 {
mysg.releasetime = -1 mysg.releasetime = -1
} }
...@@ -204,6 +205,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -204,6 +205,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
} }
gp := getg() gp := getg()
mysg := acquireSudog() mysg := acquireSudog()
mysg.releasetime = 0
if t0 != 0 { if t0 != 0 {
mysg.releasetime = -1 mysg.releasetime = -1
} }
...@@ -214,8 +216,8 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -214,8 +216,8 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
goparkunlock(&c.lock, "chan send") goparkunlock(&c.lock, "chan send")
// someone woke us up - try again // someone woke us up - try again
if mysg.releasetime != 0 { if mysg.releasetime > 0 {
t1 = int64(mysg.releasetime) t1 = mysg.releasetime
} }
releaseSudog(mysg) releaseSudog(mysg)
lock(&c.lock) lock(&c.lock)
...@@ -303,8 +305,294 @@ func closechan(c *hchan) { ...@@ -303,8 +305,294 @@ func closechan(c *hchan) {
} }
goready(gp) goready(gp)
} }
unlock(&c.lock)
}
// entry points for <- c from compiled code
//go:nosplit
func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) {
chanrecv(t, c, elem, true)
}
//go:nosplit
func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) {
_, received = chanrecv(t, c, elem, true)
return
}
// chanrecv receives on channel c and writes the received data to ep.
// ep may be nil, in which case received data is ignored.
// If block == false and no elements are available, returns (false, false).
// Otherwise, if c is closed, zeros *ep and returns (true, false).
// Otherwise, fills in *ep with an element and returns (true, true).
func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
// raceenabled: don't need to check ep, as it is always on the stack.
if debugChan {
print("chanrecv: chan=", c, "\n")
}
if c == nil {
if !block {
return
}
gopark(nil, nil, "chan receive (nil chan)")
gothrow("unreachable")
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
//
// After observing that the channel is not ready for receiving, we observe that the
// channel is not closed. Each of these observations is a single word-sized read
// (first c.sendq.first or c.qcount, and second c.closed).
// Because a channel cannot be reopened, the later observation of the channel
// being not closed implies that it was also not closed at the moment of the
// first observation. We behave as if we observed the channel at that moment
// and report that the receive cannot proceed.
//
// The order of operations is important here: reversing the operations can lead to
// incorrect behavior when racing with a close.
if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
c.dataqsiz > 0 && atomicloaduint(&c.qcount) == 0) &&
atomicload(&c.closed) == 0 {
return
}
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
lock(&c.lock)
if c.dataqsiz == 0 { // synchronous channel
if c.closed != 0 {
return recvclosed(c, ep)
}
sg := c.sendq.dequeue()
if sg != nil {
if raceenabled {
racesync(c, sg)
}
unlock(&c.lock)
if ep != nil {
memmove(ep, sg.elem, uintptr(c.elemsize))
}
gp := sg.g
gp.param = unsafe.Pointer(sg)
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
goready(gp)
selected = true
received = true
return
}
if !block {
unlock(&c.lock)
return
}
// no sender available: block on this channel.
gp := getg()
mysg := acquireSudog()
mysg.releasetime = 0
if t0 != 0 {
mysg.releasetime = -1
}
mysg.elem = ep
mysg.waitlink = nil
gp.waiting = mysg
mysg.g = gp
mysg.selectdone = nil
gp.param = nil
c.recvq.enqueue(mysg)
goparkunlock(&c.lock, "chan receive")
// someone woke us up
gp.waiting = nil
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
releaseSudog(mysg)
if gp.param != nil {
// a sender sent us some data. It already wrote to ep.
selected = true
received = true
return
}
lock(&c.lock)
if c.closed == 0 {
gothrow("chanrecv: spurious wakeup")
}
return recvclosed(c, ep)
}
// asynchronous channel
// wait for some data to appear
var t1 int64
for c.qcount <= 0 {
if c.closed != 0 {
selected, received = recvclosed(c, ep)
if t1 > 0 {
blockevent(t1-t0, 2)
}
return
}
if !block {
unlock(&c.lock)
return
}
// wait for someone to send an element
gp := getg()
mysg := acquireSudog()
mysg.releasetime = 0
if t0 != 0 {
mysg.releasetime = -1
}
mysg.elem = nil
mysg.g = gp
mysg.selectdone = nil
c.recvq.enqueue(mysg)
goparkunlock(&c.lock, "chan receive")
// someone woke us up - try again
if mysg.releasetime > 0 {
t1 = mysg.releasetime
}
releaseSudog(mysg)
lock(&c.lock)
}
if raceenabled {
raceacquire(chanbuf(c, c.recvx))
racerelease(chanbuf(c, c.recvx))
}
if ep != nil {
memmove(ep, chanbuf(c, c.recvx), uintptr(c.elemsize))
}
memclr(chanbuf(c, c.recvx), uintptr(c.elemsize))
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount--
// ping a sender now that there is space
sg := c.sendq.dequeue()
if sg != nil {
gp := sg.g
unlock(&c.lock)
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
goready(gp)
} else {
unlock(&c.lock)
}
if t1 > 0 {
blockevent(t1-t0, 2)
}
selected = true
received = true
return
}
// recvclosed is a helper function for chanrecv. Handles cleanup
// when the receiver encounters a closed channel.
// Caller must hold c.lock, recvclosed will release the lock.
func recvclosed(c *hchan, ep unsafe.Pointer) (selected, recevied bool) {
if raceenabled {
raceacquire(unsafe.Pointer(c))
}
unlock(&c.lock) unlock(&c.lock)
if ep != nil {
memclr(ep, uintptr(c.elemsize))
}
return true, false
}
// compiler implements
//
// select {
// case c <- v:
// ... foo
// default:
// ... bar
// }
//
// as
//
// if selectnbsend(c, v) {
// ... foo
// } else {
// ... bar
// }
//
func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) {
return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t)))
}
// compiler implements
//
// select {
// case v = <-c:
// ... foo
// default:
// ... bar
// }
//
// as
//
// if selectnbrecv(&v, c) {
// ... foo
// } else {
// ... bar
// }
//
func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) {
selected, _ = chanrecv(t, c, elem, false)
return
}
// compiler implements
//
// select {
// case v, ok = <-c:
// ... foo
// default:
// ... bar
// }
//
// as
//
// if c != nil && selectnbrecv2(&v, &ok, c) {
// ... foo
// } else {
// ... bar
// }
//
func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
// TODO(khr): just return 2 values from this function, now that it is in Go.
selected, *received = chanrecv(t, c, elem, false)
return
}
func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t)))
}
func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
return chanrecv(t, c, elem, !nb)
} }
func reflect_chanlen(c *hchan) int { func reflect_chanlen(c *hchan) int {
......
This diff is collapsed.
...@@ -281,7 +281,7 @@ func TestBlockProfile(t *testing.T) { ...@@ -281,7 +281,7 @@ func TestBlockProfile(t *testing.T) {
tests := [...]TestCase{ tests := [...]TestCase{
{"chan recv", blockChanRecv, ` {"chan recv", blockChanRecv, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ # 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ # 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`}, `},
...@@ -293,7 +293,7 @@ func TestBlockProfile(t *testing.T) { ...@@ -293,7 +293,7 @@ func TestBlockProfile(t *testing.T) {
`}, `},
{"chan close", blockChanClose, ` {"chan close", blockChanClose, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ # 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ # 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`}, `},
......
...@@ -233,6 +233,9 @@ func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer ...@@ -233,6 +233,9 @@ func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
//go:noescape //go:noescape
func atomicloaduintptr(ptr *uintptr) uintptr func atomicloaduintptr(ptr *uintptr) uintptr
//go:noescape
func atomicloaduint(ptr *uint) uint
//go:noescape //go:noescape
func atomicor8(ptr *uint8, val uint8) func atomicor8(ptr *uint8, val uint8)
......
...@@ -62,5 +62,11 @@ TEXT reflect·chanlen(SB), NOSPLIT, $0-0 ...@@ -62,5 +62,11 @@ TEXT reflect·chanlen(SB), NOSPLIT, $0-0
TEXT reflect·chancap(SB), NOSPLIT, $0-0 TEXT reflect·chancap(SB), NOSPLIT, $0-0
JMP runtime·reflect_chancap(SB) JMP runtime·reflect_chancap(SB)
TEXT reflect·chansend(SB), NOSPLIT, $0-0
JMP runtime·reflect_chansend(SB)
TEXT reflect·chanrecv(SB), NOSPLIT, $0-0
JMP runtime·reflect_chanrecv(SB)
TEXT runtimedebug·freeOSMemory(SB), NOSPLIT, $0-0 TEXT runtimedebug·freeOSMemory(SB), NOSPLIT, $0-0
JMP runtime·freeOSMemory(SB) JMP runtime·freeOSMemory(SB)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment