Commit 8e8cc9db authored by Austin Clements's avatar Austin Clements

runtime: use gList for gfree lists

Change-Id: I3d21587e02264fe5da1cc38d98779facfa09b927
Reviewed-on: https://go-review.googlesource.com/129398
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent de990545
...@@ -302,26 +302,27 @@ func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { ...@@ -302,26 +302,27 @@ func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
//TODO go:nowritebarrier //TODO go:nowritebarrier
func markrootFreeGStacks() { func markrootFreeGStacks() {
// Take list of dead Gs with stacks. // Take list of dead Gs with stacks.
lock(&sched.gflock) lock(&sched.gFree.lock)
list := sched.gfreeStack list := sched.gFree.stack
sched.gfreeStack = nil sched.gFree.stack = gList{}
unlock(&sched.gflock) unlock(&sched.gFree.lock)
if list == nil { if list.empty() {
return return
} }
// Free stacks. // Free stacks.
tail := list q := gQueue{list.head, list.head}
for gp := list; gp != nil; gp = gp.schedlink.ptr() { for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
shrinkstack(gp) shrinkstack(gp)
tail = gp // Manipulate the queue directly since the Gs are
// already all linked the right way.
q.tail.set(gp)
} }
// Put Gs back on the free list. // Put Gs back on the free list.
lock(&sched.gflock) lock(&sched.gFree.lock)
tail.schedlink.set(sched.gfreeNoStack) sched.gFree.noStack.pushAll(q)
sched.gfreeNoStack = list unlock(&sched.gFree.lock)
unlock(&sched.gflock)
} }
// markrootSpans marks roots for one shard of work.spans. // markrootSpans marks roots for one shard of work.spans.
......
...@@ -3471,25 +3471,21 @@ func gfput(_p_ *p, gp *g) { ...@@ -3471,25 +3471,21 @@ func gfput(_p_ *p, gp *g) {
gp.stackguard0 = 0 gp.stackguard0 = 0
} }
gp.schedlink.set(_p_.gfree) _p_.gFree.push(gp)
_p_.gfree = gp _p_.gFree.n++
_p_.gfreecnt++ if _p_.gFree.n >= 64 {
if _p_.gfreecnt >= 64 { lock(&sched.gFree.lock)
lock(&sched.gflock) for _p_.gFree.n >= 32 {
for _p_.gfreecnt >= 32 { _p_.gFree.n--
_p_.gfreecnt-- gp = _p_.gFree.pop()
gp = _p_.gfree
_p_.gfree = gp.schedlink.ptr()
if gp.stack.lo == 0 { if gp.stack.lo == 0 {
gp.schedlink.set(sched.gfreeNoStack) sched.gFree.noStack.push(gp)
sched.gfreeNoStack = gp
} else { } else {
gp.schedlink.set(sched.gfreeStack) sched.gFree.stack.push(gp)
sched.gfreeStack = gp
} }
sched.ngfree++ sched.gFree.n++
} }
unlock(&sched.gflock) unlock(&sched.gFree.lock)
} }
} }
...@@ -3497,44 +3493,42 @@ func gfput(_p_ *p, gp *g) { ...@@ -3497,44 +3493,42 @@ func gfput(_p_ *p, gp *g) {
// If local list is empty, grab a batch from global list. // If local list is empty, grab a batch from global list.
func gfget(_p_ *p) *g { func gfget(_p_ *p) *g {
retry: retry:
gp := _p_.gfree if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { lock(&sched.gFree.lock)
lock(&sched.gflock) // Move a batch of free Gs to the P.
for _p_.gfreecnt < 32 { for _p_.gFree.n < 32 {
if sched.gfreeStack != nil { // Prefer Gs with stacks.
// Prefer Gs with stacks. gp := sched.gFree.stack.pop()
gp = sched.gfreeStack if gp == nil {
sched.gfreeStack = gp.schedlink.ptr() gp = sched.gFree.noStack.pop()
} else if sched.gfreeNoStack != nil { if gp == nil {
gp = sched.gfreeNoStack break
sched.gfreeNoStack = gp.schedlink.ptr() }
} else {
break
} }
_p_.gfreecnt++ sched.gFree.n--
sched.ngfree-- _p_.gFree.push(gp)
gp.schedlink.set(_p_.gfree) _p_.gFree.n++
_p_.gfree = gp
} }
unlock(&sched.gflock) unlock(&sched.gFree.lock)
goto retry goto retry
} }
if gp != nil { gp := _p_.gFree.pop()
_p_.gfree = gp.schedlink.ptr() if gp == nil {
_p_.gfreecnt-- return nil
if gp.stack.lo == 0 { }
// Stack was deallocated in gfput. Allocate a new one. _p_.gFree.n--
systemstack(func() { if gp.stack.lo == 0 {
gp.stack = stackalloc(_FixedStack) // Stack was deallocated in gfput. Allocate a new one.
}) systemstack(func() {
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stack = stackalloc(_FixedStack)
} else { })
if raceenabled { gp.stackguard0 = gp.stack.lo + _StackGuard
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) } else {
} if raceenabled {
if msanenabled { racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) }
} if msanenabled {
msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
} }
} }
return gp return gp
...@@ -3542,21 +3536,18 @@ retry: ...@@ -3542,21 +3536,18 @@ retry:
// Purge all cached G's from gfree list to the global list. // Purge all cached G's from gfree list to the global list.
func gfpurge(_p_ *p) { func gfpurge(_p_ *p) {
lock(&sched.gflock) lock(&sched.gFree.lock)
for _p_.gfreecnt != 0 { for !_p_.gFree.empty() {
_p_.gfreecnt-- gp := _p_.gFree.pop()
gp := _p_.gfree _p_.gFree.n--
_p_.gfree = gp.schedlink.ptr()
if gp.stack.lo == 0 { if gp.stack.lo == 0 {
gp.schedlink.set(sched.gfreeNoStack) sched.gFree.noStack.push(gp)
sched.gfreeNoStack = gp
} else { } else {
gp.schedlink.set(sched.gfreeStack) sched.gFree.stack.push(gp)
sched.gfreeStack = gp
} }
sched.ngfree++ sched.gFree.n++
} }
unlock(&sched.gflock) unlock(&sched.gFree.lock)
} }
// Breakpoint executes a breakpoint trap. // Breakpoint executes a breakpoint trap.
...@@ -3669,9 +3660,9 @@ func badunlockosthread() { ...@@ -3669,9 +3660,9 @@ func badunlockosthread() {
} }
func gcount() int32 { func gcount() int32 {
n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
for _, _p_ := range allp { for _, _p_ := range allp {
n -= _p_.gfreecnt n -= _p_.gFree.n
} }
// All these variables can be changed concurrently, so the result can be inconsistent. // All these variables can be changed concurrently, so the result can be inconsistent.
...@@ -4581,7 +4572,7 @@ func schedtrace(detailed bool) { ...@@ -4581,7 +4572,7 @@ func schedtrace(detailed bool) {
if mp != nil { if mp != nil {
id = mp.id id = mp.id
} }
print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, "\n")
} else { } else {
// In non-detailed mode format lengths of per-P run queues as: // In non-detailed mode format lengths of per-P run queues as:
// [len1 len2 len3 len4] // [len1 len2 len3 len4]
......
...@@ -506,8 +506,10 @@ type p struct { ...@@ -506,8 +506,10 @@ type p struct {
runnext guintptr runnext guintptr
// Available G's (status == Gdead) // Available G's (status == Gdead)
gfree *g gFree struct {
gfreecnt int32 gList
n int32
}
sudogcache []*sudog sudogcache []*sudog
sudogbuf [128]*sudog sudogbuf [128]*sudog
...@@ -578,10 +580,12 @@ type schedt struct { ...@@ -578,10 +580,12 @@ type schedt struct {
runqsize int32 runqsize int32
// Global cache of dead G's. // Global cache of dead G's.
gflock mutex gFree struct {
gfreeStack *g lock mutex
gfreeNoStack *g stack gList // Gs with stacks
ngfree int32 noStack gList // Gs without stacks
n int32
}
// Central cache of sudog structs. // Central cache of sudog structs.
sudoglock mutex sudoglock mutex
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment