Commit 13ae271d authored by Austin Clements's avatar Austin Clements

runtime: introduce a type for lfstacks

The lfstack API is still a C-style API: lfstacks all have unhelpful
type uint64 and the APIs are package-level functions. Make the code
more readable and Go-style by creating an lfstack type with methods
for push, pop, and empty.

Change-Id: I64685fa3be0e82ae2d1a782a452a50974440a827
Reviewed-on: https://go-review.googlesource.com/38290
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 2805d206
...@@ -41,11 +41,11 @@ type LFNode struct { ...@@ -41,11 +41,11 @@ type LFNode struct {
} }
func LFStackPush(head *uint64, node *LFNode) { func LFStackPush(head *uint64, node *LFNode) {
lfstackpush(head, (*lfnode)(unsafe.Pointer(node))) (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
} }
func LFStackPop(head *uint64) *LFNode { func LFStackPop(head *uint64) *LFNode {
return (*LFNode)(unsafe.Pointer(lfstackpop(head))) return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
} }
func GCMask(x interface{}) (ret []byte) { func GCMask(x interface{}) (ret []byte) {
......
...@@ -3,10 +3,6 @@ ...@@ -3,10 +3,6 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Lock-free stack. // Lock-free stack.
// Initialize head to 0, compare with 0 to test for emptiness.
// The stack does not keep pointers to nodes,
// so they can be garbage collected if there are no other pointers to nodes.
// The following code runs only in non-preemptible contexts.
package runtime package runtime
...@@ -15,32 +11,47 @@ import ( ...@@ -15,32 +11,47 @@ import (
"unsafe" "unsafe"
) )
func lfstackpush(head *uint64, node *lfnode) { // lfstack is the head of a lock-free stack.
//
// The zero value of lfstack is an empty list.
//
// This stack is intrusive. Nodes must embed lfnode as the first field.
//
// The stack does not keep GC-visible pointers to nodes, so the caller
// is responsible for ensuring the nodes are not garbage collected
// (typically by allocating them from manually-managed memory).
type lfstack uint64
func (head *lfstack) push(node *lfnode) {
node.pushcnt++ node.pushcnt++
new := lfstackPack(node, node.pushcnt) new := lfstackPack(node, node.pushcnt)
if node1 := lfstackUnpack(new); node1 != node { if node1 := lfstackUnpack(new); node1 != node {
print("runtime: lfstackpush invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n") print("runtime: lfstack.push invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n")
throw("lfstackpush") throw("lfstack.push")
} }
for { for {
old := atomic.Load64(head) old := atomic.Load64((*uint64)(head))
node.next = old node.next = old
if atomic.Cas64(head, old, new) { if atomic.Cas64((*uint64)(head), old, new) {
break break
} }
} }
} }
func lfstackpop(head *uint64) unsafe.Pointer { func (head *lfstack) pop() unsafe.Pointer {
for { for {
old := atomic.Load64(head) old := atomic.Load64((*uint64)(head))
if old == 0 { if old == 0 {
return nil return nil
} }
node := lfstackUnpack(old) node := lfstackUnpack(old)
next := atomic.Load64(&node.next) next := atomic.Load64(&node.next)
if atomic.Cas64(head, old, next) { if atomic.Cas64((*uint64)(head), old, next) {
return unsafe.Pointer(node) return unsafe.Pointer(node)
} }
} }
} }
func (head *lfstack) empty() bool {
return atomic.Load64((*uint64)(head)) == 0
}
...@@ -782,8 +782,8 @@ const gcAssistTimeSlack = 5000 ...@@ -782,8 +782,8 @@ const gcAssistTimeSlack = 5000
const gcOverAssistWork = 64 << 10 const gcOverAssistWork = 64 << 10
var work struct { var work struct {
full uint64 // lock-free list of full blocks workbuf full lfstack // lock-free list of full blocks workbuf
empty uint64 // lock-free list of empty blocks workbuf empty lfstack // lock-free list of empty blocks workbuf
pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
// bytesMarked is the number of bytes marked this cycle. This // bytesMarked is the number of bytes marked this cycle. This
...@@ -1574,7 +1574,7 @@ func gcMarkWorkAvailable(p *p) bool { ...@@ -1574,7 +1574,7 @@ func gcMarkWorkAvailable(p *p) bool {
if p != nil && !p.gcw.empty() { if p != nil && !p.gcw.empty() {
return true return true
} }
if atomic.Load64(&work.full) != 0 { if !work.full.empty() {
return true // global work available return true // global work available
} }
if work.markrootNext < work.markrootJobs { if work.markrootNext < work.markrootJobs {
......
...@@ -312,7 +312,7 @@ func (b *workbuf) checkempty() { ...@@ -312,7 +312,7 @@ func (b *workbuf) checkempty() {
func getempty() *workbuf { func getempty() *workbuf {
var b *workbuf var b *workbuf
if work.empty != 0 { if work.empty != 0 {
b = (*workbuf)(lfstackpop(&work.empty)) b = (*workbuf)(work.empty.pop())
if b != nil { if b != nil {
b.checkempty() b.checkempty()
} }
...@@ -324,11 +324,11 @@ func getempty() *workbuf { ...@@ -324,11 +324,11 @@ func getempty() *workbuf {
} }
// putempty puts a workbuf onto the work.empty list. // putempty puts a workbuf onto the work.empty list.
// Upon entry this go routine owns b. The lfstackpush relinquishes ownership. // Upon entry this go routine owns b. The lfstack.push relinquishes ownership.
//go:nowritebarrier //go:nowritebarrier
func putempty(b *workbuf) { func putempty(b *workbuf) {
b.checkempty() b.checkempty()
lfstackpush(&work.empty, &b.node) work.empty.push(&b.node)
} }
// putfull puts the workbuf on the work.full list for the GC. // putfull puts the workbuf on the work.full list for the GC.
...@@ -337,14 +337,14 @@ func putempty(b *workbuf) { ...@@ -337,14 +337,14 @@ func putempty(b *workbuf) {
//go:nowritebarrier //go:nowritebarrier
func putfull(b *workbuf) { func putfull(b *workbuf) {
b.checknonempty() b.checknonempty()
lfstackpush(&work.full, &b.node) work.full.push(&b.node)
} }
// trygetfull tries to get a full or partially empty workbuffer. // trygetfull tries to get a full or partially empty workbuffer.
// If one is not immediately available return nil // If one is not immediately available return nil
//go:nowritebarrier //go:nowritebarrier
func trygetfull() *workbuf { func trygetfull() *workbuf {
b := (*workbuf)(lfstackpop(&work.full)) b := (*workbuf)(work.full.pop())
if b != nil { if b != nil {
b.checknonempty() b.checknonempty()
return b return b
...@@ -365,7 +365,7 @@ func trygetfull() *workbuf { ...@@ -365,7 +365,7 @@ func trygetfull() *workbuf {
// phase. // phase.
//go:nowritebarrier //go:nowritebarrier
func getfull() *workbuf { func getfull() *workbuf {
b := (*workbuf)(lfstackpop(&work.full)) b := (*workbuf)(work.full.pop())
if b != nil { if b != nil {
b.checknonempty() b.checknonempty()
return b return b
...@@ -383,7 +383,7 @@ func getfull() *workbuf { ...@@ -383,7 +383,7 @@ func getfull() *workbuf {
println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc") throw("work.nwait > work.nproc")
} }
b = (*workbuf)(lfstackpop(&work.full)) b = (*workbuf)(work.full.pop())
if b != nil { if b != nil {
b.checknonempty() b.checknonempty()
return b return b
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment