Commit 47542520 authored by Austin Clements's avatar Austin Clements

runtime: fix stackCache=0 debug mode

Setting stackCache to 0 to disable stack caches for debugging hasn't
worked for a long time. It causes stackalloc to fall back to full span
allocation, round sub-page stacks down to 0 pages, and blow up.

Fix this debug mode so it disables the per-P caches, but continues to
use the global stack pools for small stacks, which correctly handle
sub-page stacks. While we're here, rename stackCache to stackNoCache
so it acts like the rest of the stack allocator debug modes where "0"
is the right default value.

Fixes #17291.

Change-Id: If401c41cee3448513cbd7bb2e9334a8efab257a7
Reviewed-on: https://go-review.googlesource.com/43637Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent 8a1c5b2e
...@@ -121,8 +121,7 @@ const ( ...@@ -121,8 +121,7 @@ const (
stackFromSystem = 0 // allocate stacks from system memory instead of the heap stackFromSystem = 0 // allocate stacks from system memory instead of the heap
stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
stackNoCache = 0 // disable per-P small stack caches
stackCache = 1
// check the BP links during traceback. // check the BP links during traceback.
debugCheckBP = false debugCheckBP = false
...@@ -349,7 +348,7 @@ func stackalloc(n uint32) stack { ...@@ -349,7 +348,7 @@ func stackalloc(n uint32) stack {
// If we need a stack of a bigger size, we fall back on allocating // If we need a stack of a bigger size, we fall back on allocating
// a dedicated span. // a dedicated span.
var v unsafe.Pointer var v unsafe.Pointer
if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0) order := uint8(0)
n2 := n n2 := n
for n2 > _FixedStack { for n2 > _FixedStack {
...@@ -358,7 +357,7 @@ func stackalloc(n uint32) stack { ...@@ -358,7 +357,7 @@ func stackalloc(n uint32) stack {
} }
var x gclinkptr var x gclinkptr
c := thisg.m.mcache c := thisg.m.mcache
if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
// c == nil can happen in the guts of exitsyscall or // c == nil can happen in the guts of exitsyscall or
// procresize. Just get a stack from the global pool. // procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc // Also don't touch stackcache during gc
...@@ -443,7 +442,7 @@ func stackfree(stk stack) { ...@@ -443,7 +442,7 @@ func stackfree(stk stack) {
if msanenabled { if msanenabled {
msanfree(v, n) msanfree(v, n)
} }
if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0) order := uint8(0)
n2 := n n2 := n
for n2 > _FixedStack { for n2 > _FixedStack {
...@@ -452,7 +451,7 @@ func stackfree(stk stack) { ...@@ -452,7 +451,7 @@ func stackfree(stk stack) {
} }
x := gclinkptr(v) x := gclinkptr(v)
c := gp.m.mcache c := gp.m.mcache
if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
lock(&stackpoolmu) lock(&stackpoolmu)
stackpoolfree(x, order) stackpoolfree(x, order)
unlock(&stackpoolmu) unlock(&stackpoolmu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment