stack1.go 23.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import "unsafe"

const (
	// StackDebug == 0: no logging
	//            == 1: logging of per-stack operations
	//            == 2: logging of per-frame operations
	//            == 3: logging of per-word updates
	//            == 4: logging of per-word reads
	stackDebug       = 0
	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy

	stackCache = 1
)

const (
	uintptrMask = 1<<(8*ptrSize) - 1
	poisonStack = uintptrMask & 0x6868686868686868

	// Goroutine preemption request.
28
	// Stored into g->stackguard0 to cause split stack check failure.
29 30 31 32 33
	// Must be greater than any real sp.
	// 0xfffffade in hex.
	stackPreempt = uintptrMask & -1314

	// Thread is forking.
34
	// Stored into g->stackguard0 to cause split stack check failure.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
	// Must be greater than any real sp.
	stackFork = uintptrMask & -1234
)

// Global pool of spans that have free stacks.
// Stacks are assigned an order according to size.
//     order = log_2(size/FixedStack)
// There is a free list for each order.
// TODO: one lock per order?
var stackpool [_NumStackOrders]mspan
var stackpoolmu mutex

var stackfreequeue stack

func stackinit() {
	if _StackCacheSize&_PageMask != 0 {
51
		throw("cache size must be a multiple of page size")
52 53 54 55 56 57 58 59
	}
	for i := range stackpool {
		mSpanList_Init(&stackpool[i])
	}
}

// Allocates a stack from the free pool.  Must be called with
// stackpoolmu held.
60
func stackpoolalloc(order uint8) gclinkptr {
61 62 63 64 65 66
	list := &stackpool[order]
	s := list.next
	if s == list {
		// no free stacks.  Allocate another span worth.
		s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
		if s == nil {
67
			throw("out of memory")
68 69
		}
		if s.ref != 0 {
70
			throw("bad ref")
71
		}
72
		if s.freelist.ptr() != nil {
73
			throw("bad freelist")
74 75
		}
		for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
76 77
			x := gclinkptr(uintptr(s.start)<<_PageShift + i)
			x.ptr().next = s.freelist
78 79 80 81 82
			s.freelist = x
		}
		mSpanList_Insert(list, s)
	}
	x := s.freelist
83
	if x.ptr() == nil {
84
		throw("span has no free stacks")
85
	}
86
	s.freelist = x.ptr().next
87
	s.ref++
88
	if s.freelist.ptr() == nil {
89 90 91 92 93 94 95
		// all stacks in s are allocated.
		mSpanList_Remove(s)
	}
	return x
}

// Adds stack x to the free pool.  Must be called with stackpoolmu held.
96
func stackpoolfree(x gclinkptr, order uint8) {
97 98
	s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
	if s.state != _MSpanStack {
99
		throw("freeing stack not in a stack span")
100
	}
101
	if s.freelist.ptr() == nil {
102 103 104
		// s will now have a free stack
		mSpanList_Insert(&stackpool[order], s)
	}
105
	x.ptr().next = s.freelist
106 107 108 109 110
	s.freelist = x
	s.ref--
	if s.ref == 0 {
		// span is completely free - return to heap
		mSpanList_Remove(s)
111
		s.freelist = 0
112 113 114 115 116 117 118 119 120 121 122 123 124
		mHeap_FreeStack(&mheap_, s)
	}
}

// stackcacherefill/stackcacherelease implement a global pool of stack segments.
// The pool is required to prevent unlimited growth of per-thread caches.
func stackcacherefill(c *mcache, order uint8) {
	if stackDebug >= 1 {
		print("stackcacherefill order=", order, "\n")
	}

	// Grab some stacks from the global cache.
	// Grab half of the allowed capacity (to prevent thrashing).
125
	var list gclinkptr
126 127 128 129
	var size uintptr
	lock(&stackpoolmu)
	for size < _StackCacheSize/2 {
		x := stackpoolalloc(order)
130
		x.ptr().next = list
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
		list = x
		size += _FixedStack << order
	}
	unlock(&stackpoolmu)
	c.stackcache[order].list = list
	c.stackcache[order].size = size
}

func stackcacherelease(c *mcache, order uint8) {
	if stackDebug >= 1 {
		print("stackcacherelease order=", order, "\n")
	}
	x := c.stackcache[order].list
	size := c.stackcache[order].size
	lock(&stackpoolmu)
	for size > _StackCacheSize/2 {
147
		y := x.ptr().next
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
		stackpoolfree(x, order)
		x = y
		size -= _FixedStack << order
	}
	unlock(&stackpoolmu)
	c.stackcache[order].list = x
	c.stackcache[order].size = size
}

func stackcache_clear(c *mcache) {
	if stackDebug >= 1 {
		print("stackcache clear\n")
	}
	lock(&stackpoolmu)
	for order := uint8(0); order < _NumStackOrders; order++ {
		x := c.stackcache[order].list
164 165
		for x.ptr() != nil {
			y := x.ptr().next
166 167 168
			stackpoolfree(x, order)
			x = y
		}
169
		c.stackcache[order].list = 0
170 171 172 173 174 175 176 177 178 179 180
		c.stackcache[order].size = 0
	}
	unlock(&stackpoolmu)
}

func stackalloc(n uint32) stack {
	// Stackalloc must be called on scheduler stack, so that we
	// never try to grow the stack during the code that stackalloc runs.
	// Doing so would cause a deadlock (issue 1547).
	thisg := getg()
	if thisg != thisg.m.g0 {
181
		throw("stackalloc not on scheduler stack")
182 183
	}
	if n&(n-1) != 0 {
184
		throw("stack size not a power of 2")
185 186 187 188 189 190 191 192
	}
	if stackDebug >= 1 {
		print("stackalloc ", n, "\n")
	}

	if debug.efence != 0 || stackFromSystem != 0 {
		v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
		if v == nil {
193
			throw("out of memory (stackalloc)")
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
		}
		return stack{uintptr(v), uintptr(v) + uintptr(n)}
	}

	// Small stacks are allocated with a fixed-size free-list allocator.
	// If we need a stack of a bigger size, we fall back on allocating
	// a dedicated span.
	var v unsafe.Pointer
	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
		order := uint8(0)
		n2 := n
		for n2 > _FixedStack {
			order++
			n2 >>= 1
		}
209
		var x gclinkptr
210
		c := thisg.m.mcache
211
		if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
212 213 214 215 216 217 218 219 220
			// c == nil can happen in the guts of exitsyscall or
			// procresize. Just get a stack from the global pool.
			// Also don't touch stackcache during gc
			// as it's flushed concurrently.
			lock(&stackpoolmu)
			x = stackpoolalloc(order)
			unlock(&stackpoolmu)
		} else {
			x = c.stackcache[order].list
221
			if x.ptr() == nil {
222 223 224
				stackcacherefill(c, order)
				x = c.stackcache[order].list
			}
225
			c.stackcache[order].list = x.ptr().next
226 227 228 229 230 231
			c.stackcache[order].size -= uintptr(n)
		}
		v = (unsafe.Pointer)(x)
	} else {
		s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
		if s == nil {
232
			throw("out of memory")
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
		}
		v = (unsafe.Pointer)(s.start << _PageShift)
	}

	if raceenabled {
		racemalloc(v, uintptr(n))
	}
	if stackDebug >= 1 {
		print("  allocated ", v, "\n")
	}
	return stack{uintptr(v), uintptr(v) + uintptr(n)}
}

func stackfree(stk stack) {
	gp := getg()
	n := stk.hi - stk.lo
	v := (unsafe.Pointer)(stk.lo)
	if n&(n-1) != 0 {
251
		throw("stack not a power of 2")
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	}
	if stackDebug >= 1 {
		println("stackfree", v, n)
		memclr(v, n) // for testing, clobber stack data
	}
	if debug.efence != 0 || stackFromSystem != 0 {
		if debug.efence != 0 || stackFaultOnFree != 0 {
			sysFault(v, n)
		} else {
			sysFree(v, n, &memstats.stacks_sys)
		}
		return
	}
	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
		order := uint8(0)
		n2 := n
		for n2 > _FixedStack {
			order++
			n2 >>= 1
		}
272
		x := gclinkptr(v)
273
		c := gp.m.mcache
274
		if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
275 276 277 278 279 280 281
			lock(&stackpoolmu)
			stackpoolfree(x, order)
			unlock(&stackpoolmu)
		} else {
			if c.stackcache[order].size >= _StackCacheSize {
				stackcacherelease(c, order)
			}
282
			x.ptr().next = c.stackcache[order].list
283 284 285 286 287 288 289
			c.stackcache[order].list = x
			c.stackcache[order].size += n
		}
	} else {
		s := mHeap_Lookup(&mheap_, v)
		if s.state != _MSpanStack {
			println(hex(s.start<<_PageShift), v)
290
			throw("bad span state")
291 292 293 294 295 296 297 298
		}
		mHeap_FreeStack(&mheap_, s)
	}
}

var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real

var mapnames = []string{
299 300 301
	typeDead:    "---",
	typeScalar:  "scalar",
	typePointer: "ptr",
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
}

// Stack frame layout
//
// (x86)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// |  return address  |
// +------------------+ <- frame->varp
// |     locals       |
// +------------------+
// |  args to callee  |
// +------------------+ <- frame->sp
//
// (arm)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | caller's retaddr |
// +------------------+ <- frame->varp
// |     locals       |
// +------------------+
// |  args to callee  |
// +------------------+
// |  return address  |
// +------------------+ <- frame->sp

type adjustinfo struct {
	old   stack
	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
}

// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
// If so, it rewrites *vpp to point into the new stack.
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
	pp := (*unsafe.Pointer)(vpp)
	p := *pp
	if stackDebug >= 4 {
		print("        ", pp, ":", p, "\n")
	}
	if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
		*pp = add(p, adjinfo.delta)
		if stackDebug >= 3 {
			print("        adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
		}
	}
}

type gobitvector struct {
	n        uintptr
	bytedata []uint8
}

func gobv(bv bitvector) gobitvector {
	return gobitvector{
		uintptr(bv.n),
		(*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
	}
}

func ptrbits(bv *gobitvector, i uintptr) uint8 {
	return (bv.bytedata[i/4] >> ((i & 3) * 2)) & 3
}

// bv describes the memory starting at address scanp.
// Adjust any pointers contained therein.
func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
	bv := gobv(*cbv)
	minp := adjinfo.old.lo
	maxp := adjinfo.old.hi
	delta := adjinfo.delta
374
	num := uintptr(bv.n) / typeBitsWidth
375 376 377 378 379 380
	for i := uintptr(0); i < num; i++ {
		if stackDebug >= 4 {
			print("        ", add(scanp, i*ptrSize), ":", mapnames[ptrbits(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/4], "\n")
		}
		switch ptrbits(&bv, i) {
		default:
381
			throw("unexpected pointer bits")
382
		case typeDead:
383 384 385
			if debug.gcdead != 0 {
				*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
			}
386
		case typeScalar:
387
			// ok
388
		case typePointer:
389 390
			p := *(*unsafe.Pointer)(add(scanp, i*ptrSize))
			up := uintptr(p)
391
			if f != nil && 0 < up && up < _PageSize && debug.invalidptr != 0 || up == poisonStack {
392 393 394
				// Looks like a junk value in a pointer slot.
				// Live analysis wrong?
				getg().m.traceback = 2
395
				print("runtime: bad pointer in frame ", funcname(f), " at ", add(scanp, i*ptrSize), ": ", p, "\n")
396
				throw("invalid stack pointer")
397 398 399
			}
			if minp <= up && up < maxp {
				if stackDebug >= 3 {
400
					print("adjust ptr ", p, " ", funcname(f), "\n")
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
				}
				*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(up + delta)
			}
		}
	}
}

// Note: the argument/return area is adjusted by the callee.
func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
	adjinfo := (*adjustinfo)(arg)
	targetpc := frame.continpc
	if targetpc == 0 {
		// Frame is dead.
		return true
	}
	f := frame.fn
	if stackDebug >= 2 {
		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
	}
420 421
	if f.entry == systemstack_switchPC {
		// A special routine at the bottom of stack of a goroutine that does an systemstack call.
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
		// We will allow it to be copied even though we don't
		// have full GC info for it (because it is written in asm).
		return true
	}
	if targetpc != f.entry {
		targetpc--
	}
	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
	if pcdata == -1 {
		pcdata = 0 // in prologue
	}

	// Adjust local variables if stack frame has been allocated.
	size := frame.varp - frame.sp
	var minsize uintptr
	if thechar != '6' && thechar != '8' {
		minsize = ptrSize
	} else {
		minsize = 0
	}
	if size > minsize {
		var bv bitvector
		stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
		if stackmap == nil || stackmap.n <= 0 {
			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
447
			throw("missing stackmap")
448 449 450 451 452
		}
		// Locals bitmap information, scan just the pointers in locals.
		if pcdata < 0 || pcdata >= stackmap.n {
			// don't know where we are
			print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
453
			throw("bad symbol table")
454 455
		}
		bv = stackmapdata(stackmap, pcdata)
456
		size = (uintptr(bv.n) / typeBitsWidth) * ptrSize
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
		if stackDebug >= 3 {
			print("      locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
		}
		adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
	}

	// Adjust arguments.
	if frame.arglen > 0 {
		var bv bitvector
		if frame.argmap != nil {
			bv = *frame.argmap
		} else {
			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
			if stackmap == nil || stackmap.n <= 0 {
				print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
472
				throw("missing stackmap")
473 474 475 476
			}
			if pcdata < 0 || pcdata >= stackmap.n {
				// don't know where we are
				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
477
				throw("bad symbol table")
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
			}
			bv = stackmapdata(stackmap, pcdata)
		}
		if stackDebug >= 3 {
			print("      args\n")
		}
		adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
	}
	return true
}

func adjustctxt(gp *g, adjinfo *adjustinfo) {
	adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt))
}

func adjustdefers(gp *g, adjinfo *adjustinfo) {
	// Adjust defer argument blocks the same way we adjust active stack frames.
	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))

	// Adjust pointers in the Defer structs.
	// Defer structs themselves are never on the stack.
	for d := gp._defer; d != nil; d = d.link {
		adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn))
501
		adjustpointer(adjinfo, (unsafe.Pointer)(&d.sp))
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
		adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic))
	}
}

func adjustpanics(gp *g, adjinfo *adjustinfo) {
	// Panics are on stack and already adjusted.
	// Update pointer to head of list in G.
	adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic))
}

func adjustsudogs(gp *g, adjinfo *adjustinfo) {
	// the data elements pointed to by a SudoG structure
	// might be in the stack.
	for s := gp.waiting; s != nil; s = s.waitlink {
		adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem))
		adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone))
	}
}

func fillstack(stk stack, b byte) {
	for p := stk.lo; p < stk.hi; p++ {
		*(*byte)(unsafe.Pointer(p)) = b
	}
}

// Copies gp's stack to a new stack of a different size.
528
// Caller must have changed gp status to Gcopystack.
529 530
func copystack(gp *g, newsize uintptr) {
	if gp.syscallsp != 0 {
531
		throw("stack growth not allowed in system call")
532 533 534
	}
	old := gp.stack
	if old.lo == 0 {
535
		throw("nil stackbase")
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
	}
	used := old.hi - gp.sched.sp

	// allocate new stack
	new := stackalloc(uint32(newsize))
	if stackPoisonCopy != 0 {
		fillstack(new, 0xfd)
	}
	if stackDebug >= 1 {
		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", old.hi-old.lo, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
	}

	// adjust pointers in the to-be-copied frames
	var adjinfo adjustinfo
	adjinfo.old = old
	adjinfo.delta = new.hi - old.hi
	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)

	// adjust other miscellaneous things that have pointers into stacks.
	adjustctxt(gp, &adjinfo)
	adjustdefers(gp, &adjinfo)
	adjustpanics(gp, &adjinfo)
	adjustsudogs(gp, &adjinfo)

	// copy the stack to the new location
	if stackPoisonCopy != 0 {
		fillstack(new, 0xfb)
	}
	memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)

	// Swap out old stack for new one
	gp.stack = new
568
	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
	gp.sched.sp = new.hi - used

	// free old stack
	if stackPoisonCopy != 0 {
		fillstack(old, 0xfc)
	}
	if newsize > old.hi-old.lo {
		// growing, free stack immediately
		stackfree(old)
	} else {
		// shrinking, queue up free operation.  We can't actually free the stack
		// just yet because we might run into the following situation:
		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
		// 2) The stack that pointer points to is shrunk
		// 3) The old stack is freed
		// 4) The containing span is marked free
		// 5) GC attempts to mark the SudoG.elem pointer.  The marking fails because
		//    the pointer looks like a pointer into a free span.
		// By not freeing, we prevent step #4 until GC is done.
		lock(&stackpoolmu)
		*(*stack)(unsafe.Pointer(old.lo)) = stackfreequeue
		stackfreequeue = old
		unlock(&stackpoolmu)
	}
}

// round x up to a power of 2.
func round2(x int32) int32 {
	s := uint(0)
	for 1<<s < x {
		s++
	}
	return 1 << s
}

// Called from runtime·morestack when more stack is needed.
// Allocate larger stack and relocate to new stack.
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
// If the GC is trying to stop this g then it will set preemptscan to true.
func newstack() {
	thisg := getg()
	// TODO: double check all gp. shouldn't be getg().
613
	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
614
		throw("stack growth after fork")
615
	}
616
	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
617 618
		print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
		morebuf := thisg.m.morebuf
619
		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
620
		throw("runtime: wrong goroutine in newstack")
621 622 623 624 625 626 627 628 629 630
	}
	if thisg.m.curg.throwsplit {
		gp := thisg.m.curg
		// Update syscallsp, syscallpc in case traceback uses them.
		morebuf := thisg.m.morebuf
		gp.syscallsp = morebuf.sp
		gp.syscallpc = morebuf.pc
		print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
631 632

		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
633
		throw("runtime: stack split at bad time")
634 635 636 637 638 639 640
	}

	gp := thisg.m.curg
	morebuf := thisg.m.morebuf
	thisg.m.morebuf.pc = 0
	thisg.m.morebuf.lr = 0
	thisg.m.morebuf.sp = 0
641
	thisg.m.morebuf.g = 0
642 643
	rewindmorestack(&gp.sched)

644 645 646 647 648
	// NOTE: stackguard0 may change underfoot, if another thread
	// is about to try to preempt gp. Read it just once and use that same
	// value now and below.
	preempt := atomicloaduintptr(&gp.stackguard0) == stackPreempt

649 650
	// Be conservative about where we preempt.
	// We are interested in preempting user Go code, not runtime code.
651 652
	// If we're holding locks, mallocing, or preemption is disabled, don't
	// preempt.
653 654 655 656 657 658 659 660
	// This check is very early in newstack so that even the status change
	// from Grunning to Gwaiting and back doesn't happen in this case.
	// That status change by itself can be viewed as a small preemption,
	// because the GC might change Gwaiting to Gscanwaiting, and then
	// this goroutine has to wait for the GC to finish before continuing.
	// If the GC is in some way dependent on this goroutine (for example,
	// it needs a lock held by the goroutine), that small preemption turns
	// into a real deadlock.
661
	if preempt {
662
		if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.status != _Prunning {
663 664 665 666 667 668
			// Let the goroutine keep running for now.
			// gp->preempt is set, so it will be preempted next time.
			gp.stackguard0 = gp.stack.lo + _StackGuard
			gogo(&gp.sched) // never return
		}
	}
669

670 671
	// The goroutine must be executing in order to call newstack,
	// so it must be Grunning (or Gscanrunning).
672 673 674 675
	casgstatus(gp, _Grunning, _Gwaiting)
	gp.waitreason = "stack growth"

	if gp.stack.lo == 0 {
676
		throw("missing stack in newstack")
677 678 679 680 681 682 683 684 685 686 687 688 689 690
	}
	sp := gp.sched.sp
	if thechar == '6' || thechar == '8' {
		// The call to morestack cost a word.
		sp -= ptrSize
	}
	if stackDebug >= 1 || sp < gp.stack.lo {
		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
	}
	if sp < gp.stack.lo {
		print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
691
		throw("runtime: split stack overflow")
692 693
	}

694 695 696 697 698 699 700 701
	if gp.sched.ctxt != nil {
		// morestack wrote sched.ctxt on its way in here,
		// without a write barrier. Run the write barrier now.
		// It is not possible to be preempted between then
		// and now, so it's okay.
		writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
	}

702
	if preempt {
703
		if gp == thisg.m.g0 {
704
			throw("runtime: preempt g0")
705 706
		}
		if thisg.m.p == nil && thisg.m.locks == 0 {
707
			throw("runtime: g is running but p is not")
708 709
		}
		if gp.preemptscan {
710 711 712 713
			for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
				// Likely to be racing with the GC as it sees a _Gwaiting and does the stack scan.
				// If so this stack will be scanned twice which does not change correctness.
			}
714
			gcphasework(gp)
715
			casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
716
			casgstatus(gp, _Gwaiting, _Grunning)
717
			gp.stackguard0 = gp.stack.lo + _StackGuard
718 719 720 721 722 723 724
			gp.preempt = false
			gp.preemptscan = false // Tells the GC premption was successful.
			gogo(&gp.sched)        // never return
		}

		// Act like goroutine called runtime.Gosched.
		casgstatus(gp, _Gwaiting, _Grunning)
725
		gopreempt_m(gp) // never return
726 727 728 729 730 731 732
	}

	// Allocate a bigger segment and move the stack.
	oldsize := int(gp.stack.hi - gp.stack.lo)
	newsize := oldsize * 2
	if uintptr(newsize) > maxstacksize {
		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
733
		throw("stack overflow")
734 735
	}

736
	casgstatus(gp, _Gwaiting, _Gcopystack)
737 738 739

	// The concurrent GC will not scan the stack while we are doing the copy since
	// the gp is in a Gcopystack status.
740 741 742 743
	copystack(gp, uintptr(newsize))
	if stackDebug >= 1 {
		print("stack grow done\n")
	}
744
	casgstatus(gp, _Gcopystack, _Grunning)
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
	gogo(&gp.sched)
}

//go:nosplit
func nilfunc() {
	*(*uint8)(nil) = 0
}

// adjust Gobuf as if it executed a call to fn
// and then did an immediate gosave.
func gostartcallfn(gobuf *gobuf, fv *funcval) {
	var fn unsafe.Pointer
	if fv != nil {
		fn = (unsafe.Pointer)(fv.fn)
	} else {
		fn = unsafe.Pointer(funcPC(nilfunc))
	}
	gostartcall(gobuf, fn, (unsafe.Pointer)(fv))
}

// Maybe shrink the stack being used by gp.
// Called at garbage collection time.
func shrinkstack(gp *g) {
	if readgstatus(gp) == _Gdead {
		if gp.stack.lo != 0 {
			// Free whole stack - it will get reallocated
			// if G is used again.
			stackfree(gp.stack)
			gp.stack.lo = 0
			gp.stack.hi = 0
		}
		return
	}
	if gp.stack.lo == 0 {
779
		throw("missing stack in shrinkstack")
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
	}

	oldsize := gp.stack.hi - gp.stack.lo
	newsize := oldsize / 2
	if newsize < _FixedStack {
		return // don't shrink below the minimum-sized stack
	}
	used := gp.stack.hi - gp.sched.sp
	if used >= oldsize/4 {
		return // still using at least 1/4 of the segment.
	}

	// We can't copy the stack if we're in a syscall.
	// The syscall might have pointers into the stack.
	if gp.syscallsp != 0 {
		return
	}
797
	if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
798 799 800 801 802 803
		return
	}

	if stackDebug > 0 {
		print("shrinking stack ", oldsize, "->", newsize, "\n")
	}
804

805
	oldstatus := casgcopystack(gp)
806
	copystack(gp, newsize)
807
	casgstatus(gp, _Gcopystack, oldstatus)
808 809 810 811 812 813 814 815 816 817 818 819 820 821
}

// Do any delayed stack freeing that was queued up during GC.
func shrinkfinish() {
	lock(&stackpoolmu)
	s := stackfreequeue
	stackfreequeue = stack{}
	unlock(&stackpoolmu)
	for s.lo != 0 {
		t := *(*stack)(unsafe.Pointer(s.lo))
		stackfree(s)
		s = t
	}
}
822 823 824 825 826 827 828

//go:nosplit
func morestackc() {
	systemstack(func() {
		throw("attempt to execute C code on Go stack")
	})
}