stack.go 39.3 KB
Newer Older
1 2 3 4 5 6
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

7
import (
Changkun Ou's avatar
Changkun Ou committed
8
	"internal/cpu"
9
	"runtime/internal/atomic"
10
	"runtime/internal/sys"
11 12
	"unsafe"
)
13

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
/*
Stack layout parameters.
Included both by runtime (compiled via 6c) and linkers (compiled via gcc).

The per-goroutine g->stackguard is set to point StackGuard bytes
above the bottom of the stack.  Each function compares its stack
pointer against g->stackguard to check for overflow.  To cut one
instruction from the check sequence for functions with tiny frames,
the stack is allowed to protrude StackSmall bytes below the stack
guard.  Functions with large frames don't bother with the check and
always call morestack.  The sequences are (for amd64, others are
similar):

	guard = g->stackguard
	frame = function's stack frame size
	argsize = size of function arguments (call + return)

	stack frame size <= StackSmall:
		CMPQ guard, SP
		JHI 3(PC)
		MOVQ m->morearg, $(argsize << 32)
		CALL morestack(SB)

	stack frame size > StackSmall but < StackBig
		LEAQ (frame-StackSmall)(SP), R0
		CMPQ guard, R0
		JHI 3(PC)
		MOVQ m->morearg, $(argsize << 32)
		CALL morestack(SB)

	stack frame size >= StackBig:
		MOVQ m->morearg, $((argsize << 32) | frame)
		CALL morestack(SB)

The bottom StackGuard - StackSmall bytes are important: there has
to be enough room to execute functions that refuse to check for
stack overflow, either because they need to be adjacent to the
actual caller's frame (deferproc) or because they handle the imminent
stack overflow (morestack).

For example, deferproc might call malloc, which does one of the
above checks (without allocating a full frame), which might trigger
a call to morestack.  This sequence needs to fit in the bottom
section of the stack.  On amd64, morestack's frame is 40 bytes, and
deferproc's frame is 56 bytes.  That fits well within the
StackGuard - StackSmall bytes at the bottom.
The linkers explore all possible call traces involving non-splitting
functions to make sure that this limit cannot be violated.
*/

const (
	// StackSystem is a number of additional bytes to add
	// to each stack below the usual guard area for OS-specific
	// purposes like signal handling. Used on Windows, Plan 9,
68 69
	// and iOS because they do not use a separate stack.
	_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

	// The minimum size of stack used by Go code
	_StackMin = 2048

	// The minimum stack size to allocate.
	// The hackery here rounds FixedStack0 up to a power of 2.
	_FixedStack0 = _StackMin + _StackSystem
	_FixedStack1 = _FixedStack0 - 1
	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
	_FixedStack  = _FixedStack6 + 1

	// Functions that need frames bigger than this use an extra
	// instruction to do the stack split check, to avoid overflow
	// in case SP - framesize wraps below zero.
	// This value can be no bigger than the size of the unmapped
	// space at zero.
	_StackBig = 4096

	// The stack guard is a pointer this many bytes above the
	// bottom of the stack.
94
	_StackGuard = 896*sys.StackGuardMultiplier + _StackSystem
95 96

	// After a stack split check the SP is allowed to be this
97
	// many bytes below the stack guard. This saves an instruction
98 99 100 101 102 103 104 105
	// in the checking sequence for tiny frames.
	_StackSmall = 128

	// The maximum number of bytes that a chain of NOSPLIT
	// functions can use.
	_StackLimit = _StackGuard - _StackSystem - _StackSmall
)

106
const (
107
	// stackDebug == 0: no logging
108 109 110 111 112 113 114 115
	//            == 1: logging of per-stack operations
	//            == 2: logging of per-frame operations
	//            == 3: logging of per-word updates
	//            == 4: logging of per-word reads
	stackDebug       = 0
	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
116
	stackNoCache     = 0 // disable per-P small stack caches
117 118 119

	// check the BP links during traceback.
	debugCheckBP = false
120 121 122
)

const (
123
	uintptrMask = 1<<(8*sys.PtrSize) - 1
124 125

	// Goroutine preemption request.
126
	// Stored into g->stackguard0 to cause split stack check failure.
127 128 129 130 131
	// Must be greater than any real sp.
	// 0xfffffade in hex.
	stackPreempt = uintptrMask & -1314

	// Thread is forking.
132
	// Stored into g->stackguard0 to cause split stack check failure.
133 134 135 136 137 138 139 140
	// Must be greater than any real sp.
	stackFork = uintptrMask & -1234
)

// Global pool of spans that have free stacks.
// Stacks are assigned an order according to size.
//     order = log_2(size/FixedStack)
// There is a free list for each order.
Changkun Ou's avatar
Changkun Ou committed
141 142 143 144 145 146 147 148 149 150
var stackpool [_NumStackOrders]struct {
	item stackpoolItem
	_    [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
}

//go:notinheap
type stackpoolItem struct {
	mu   mutex
	span mSpanList
}
151

152 153 154
// Global pool of large stack spans.
var stackLarge struct {
	lock mutex
155
	free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
156
}
157 158 159

func stackinit() {
	if _StackCacheSize&_PageMask != 0 {
160
		throw("cache size must be a multiple of page size")
161 162
	}
	for i := range stackpool {
Changkun Ou's avatar
Changkun Ou committed
163
		stackpool[i].item.span.init()
164
	}
165 166 167 168 169 170 171 172 173 174 175 176 177
	for i := range stackLarge.free {
		stackLarge.free[i].init()
	}
}

// stacklog2 returns ⌊log_2(n)⌋.
func stacklog2(n uintptr) int {
	log2 := 0
	for n > 1 {
		n >>= 1
		log2++
	}
	return log2
178 179
}

180
// Allocates a stack from the free pool. Must be called with
Changkun Ou's avatar
Changkun Ou committed
181
// stackpool[order].item.mu held.
182
func stackpoolalloc(order uint8) gclinkptr {
Changkun Ou's avatar
Changkun Ou committed
183
	list := &stackpool[order].item.span
184 185
	s := list.first
	if s == nil {
186
		// no free stacks. Allocate another span worth.
187
		s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
188
		if s == nil {
189
			throw("out of memory")
190
		}
191 192
		if s.allocCount != 0 {
			throw("bad allocCount")
193
		}
194 195
		if s.manualFreeList.ptr() != nil {
			throw("bad manualFreeList")
196
		}
197
		osStackAlloc(s)
198 199
		s.elemsize = _FixedStack << order
		for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
200
			x := gclinkptr(s.base() + i)
201 202
			x.ptr().next = s.manualFreeList
			s.manualFreeList = x
203
		}
204
		list.insert(s)
205
	}
206
	x := s.manualFreeList
207
	if x.ptr() == nil {
208
		throw("span has no free stacks")
209
	}
210
	s.manualFreeList = x.ptr().next
211
	s.allocCount++
212
	if s.manualFreeList.ptr() == nil {
213
		// all stacks in s are allocated.
214
		list.remove(s)
215 216 217 218
	}
	return x
}

Changkun Ou's avatar
Changkun Ou committed
219
// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
220
func stackpoolfree(x gclinkptr, order uint8) {
221
	s := spanOfUnchecked(uintptr(x))
222
	if s.state != mSpanManual {
223
		throw("freeing stack not in a stack span")
224
	}
225
	if s.manualFreeList.ptr() == nil {
226
		// s will now have a free stack
Changkun Ou's avatar
Changkun Ou committed
227
		stackpool[order].item.span.insert(s)
228
	}
229 230
	x.ptr().next = s.manualFreeList
	s.manualFreeList = x
231 232
	s.allocCount--
	if gcphase == _GCoff && s.allocCount == 0 {
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
		// Span is completely free. Return it to the heap
		// immediately if we're sweeping.
		//
		// If GC is active, we delay the free until the end of
		// GC to avoid the following type of situation:
		//
		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
		// 2) The stack that pointer points to is copied
		// 3) The old stack is freed
		// 4) The containing span is marked free
		// 5) GC attempts to mark the SudoG.elem pointer. The
		//    marking fails because the pointer looks like a
		//    pointer into a free span.
		//
		// By not freeing, we prevent step #4 until GC is done.
Changkun Ou's avatar
Changkun Ou committed
248
		stackpool[order].item.span.remove(s)
249
		s.manualFreeList = 0
250
		osStackFree(s)
251
		mheap_.freeManual(s, &memstats.stacks_inuse)
252 253 254 255 256
	}
}

// stackcacherefill/stackcacherelease implement a global pool of stack segments.
// The pool is required to prevent unlimited growth of per-thread caches.
257 258
//
//go:systemstack
259 260 261 262 263 264 265
func stackcacherefill(c *mcache, order uint8) {
	if stackDebug >= 1 {
		print("stackcacherefill order=", order, "\n")
	}

	// Grab some stacks from the global cache.
	// Grab half of the allowed capacity (to prevent thrashing).
266
	var list gclinkptr
267
	var size uintptr
Changkun Ou's avatar
Changkun Ou committed
268
	lock(&stackpool[order].item.mu)
269 270
	for size < _StackCacheSize/2 {
		x := stackpoolalloc(order)
271
		x.ptr().next = list
272 273 274
		list = x
		size += _FixedStack << order
	}
Changkun Ou's avatar
Changkun Ou committed
275
	unlock(&stackpool[order].item.mu)
276 277 278 279
	c.stackcache[order].list = list
	c.stackcache[order].size = size
}

280
//go:systemstack
281 282 283 284 285 286
func stackcacherelease(c *mcache, order uint8) {
	if stackDebug >= 1 {
		print("stackcacherelease order=", order, "\n")
	}
	x := c.stackcache[order].list
	size := c.stackcache[order].size
Changkun Ou's avatar
Changkun Ou committed
287
	lock(&stackpool[order].item.mu)
288
	for size > _StackCacheSize/2 {
289
		y := x.ptr().next
290 291 292 293
		stackpoolfree(x, order)
		x = y
		size -= _FixedStack << order
	}
Changkun Ou's avatar
Changkun Ou committed
294
	unlock(&stackpool[order].item.mu)
295 296 297 298
	c.stackcache[order].list = x
	c.stackcache[order].size = size
}

299
//go:systemstack
300 301 302 303 304
func stackcache_clear(c *mcache) {
	if stackDebug >= 1 {
		print("stackcache clear\n")
	}
	for order := uint8(0); order < _NumStackOrders; order++ {
Changkun Ou's avatar
Changkun Ou committed
305
		lock(&stackpool[order].item.mu)
306
		x := c.stackcache[order].list
307 308
		for x.ptr() != nil {
			y := x.ptr().next
309 310 311
			stackpoolfree(x, order)
			x = y
		}
312
		c.stackcache[order].list = 0
313
		c.stackcache[order].size = 0
Changkun Ou's avatar
Changkun Ou committed
314
		unlock(&stackpool[order].item.mu)
315 316 317
	}
}

318 319 320 321 322 323
// stackalloc allocates an n byte stack.
//
// stackalloc must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
324
func stackalloc(n uint32) stack {
325 326 327 328 329
	// Stackalloc must be called on scheduler stack, so that we
	// never try to grow the stack during the code that stackalloc runs.
	// Doing so would cause a deadlock (issue 1547).
	thisg := getg()
	if thisg != thisg.m.g0 {
330
		throw("stackalloc not on scheduler stack")
331 332
	}
	if n&(n-1) != 0 {
333
		throw("stack size not a power of 2")
334 335 336 337 338 339
	}
	if stackDebug >= 1 {
		print("stackalloc ", n, "\n")
	}

	if debug.efence != 0 || stackFromSystem != 0 {
340 341
		n = uint32(round(uintptr(n), physPageSize))
		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
342
		if v == nil {
343
			throw("out of memory (stackalloc)")
344
		}
345
		return stack{uintptr(v), uintptr(v) + uintptr(n)}
346 347 348 349 350 351
	}

	// Small stacks are allocated with a fixed-size free-list allocator.
	// If we need a stack of a bigger size, we fall back on allocating
	// a dedicated span.
	var v unsafe.Pointer
352
	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
353 354 355 356 357 358
		order := uint8(0)
		n2 := n
		for n2 > _FixedStack {
			order++
			n2 >>= 1
		}
359
		var x gclinkptr
360
		c := thisg.m.mcache
361
		if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" {
362 363 364 365
			// c == nil can happen in the guts of exitsyscall or
			// procresize. Just get a stack from the global pool.
			// Also don't touch stackcache during gc
			// as it's flushed concurrently.
Changkun Ou's avatar
Changkun Ou committed
366
			lock(&stackpool[order].item.mu)
367
			x = stackpoolalloc(order)
Changkun Ou's avatar
Changkun Ou committed
368
			unlock(&stackpool[order].item.mu)
369 370
		} else {
			x = c.stackcache[order].list
371
			if x.ptr() == nil {
372 373 374
				stackcacherefill(c, order)
				x = c.stackcache[order].list
			}
375
			c.stackcache[order].list = x.ptr().next
376 377
			c.stackcache[order].size -= uintptr(n)
		}
378
		v = unsafe.Pointer(x)
379
	} else {
380 381 382 383 384 385 386 387 388 389 390 391
		var s *mspan
		npage := uintptr(n) >> _PageShift
		log2npage := stacklog2(npage)

		// Try to get a stack from the large stack cache.
		lock(&stackLarge.lock)
		if !stackLarge.free[log2npage].isEmpty() {
			s = stackLarge.free[log2npage].first
			stackLarge.free[log2npage].remove(s)
		}
		unlock(&stackLarge.lock)

392
		if s == nil {
393
			// Allocate a new stack from the heap.
394
			s = mheap_.allocManual(npage, &memstats.stacks_inuse)
395 396 397
			if s == nil {
				throw("out of memory")
			}
398
			osStackAlloc(s)
399
			s.elemsize = uintptr(n)
400
		}
401
		v = unsafe.Pointer(s.base())
402 403 404 405 406
	}

	if raceenabled {
		racemalloc(v, uintptr(n))
	}
407 408 409
	if msanenabled {
		msanmalloc(v, uintptr(n))
	}
410 411 412
	if stackDebug >= 1 {
		print("  allocated ", v, "\n")
	}
413
	return stack{uintptr(v), uintptr(v) + uintptr(n)}
414 415
}

416 417 418 419 420 421
// stackfree frees an n byte stack allocation at stk.
//
// stackfree must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
422
func stackfree(stk stack) {
423
	gp := getg()
424
	v := unsafe.Pointer(stk.lo)
425
	n := stk.hi - stk.lo
426
	if n&(n-1) != 0 {
427
		throw("stack not a power of 2")
428
	}
429 430 431
	if stk.lo+n < stk.hi {
		throw("bad stack size")
	}
432 433
	if stackDebug >= 1 {
		println("stackfree", v, n)
434
		memclrNoHeapPointers(v, n) // for testing, clobber stack data
435 436 437 438 439 440 441 442 443
	}
	if debug.efence != 0 || stackFromSystem != 0 {
		if debug.efence != 0 || stackFaultOnFree != 0 {
			sysFault(v, n)
		} else {
			sysFree(v, n, &memstats.stacks_sys)
		}
		return
	}
444 445 446
	if msanenabled {
		msanfree(v, n)
	}
447
	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
448 449 450 451 452 453
		order := uint8(0)
		n2 := n
		for n2 > _FixedStack {
			order++
			n2 >>= 1
		}
454
		x := gclinkptr(v)
455
		c := gp.m.mcache
456
		if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" {
Changkun Ou's avatar
Changkun Ou committed
457
			lock(&stackpool[order].item.mu)
458
			stackpoolfree(x, order)
Changkun Ou's avatar
Changkun Ou committed
459
			unlock(&stackpool[order].item.mu)
460 461 462 463
		} else {
			if c.stackcache[order].size >= _StackCacheSize {
				stackcacherelease(c, order)
			}
464
			x.ptr().next = c.stackcache[order].list
465 466 467 468
			c.stackcache[order].list = x
			c.stackcache[order].size += n
		}
	} else {
469
		s := spanOfUnchecked(uintptr(v))
470
		if s.state != mSpanManual {
471
			println(hex(s.base()), v)
472
			throw("bad span state")
473
		}
474 475 476
		if gcphase == _GCoff {
			// Free the stack immediately if we're
			// sweeping.
477
			osStackFree(s)
478
			mheap_.freeManual(s, &memstats.stacks_inuse)
479
		} else {
480 481 482 483 484 485 486 487 488
			// If the GC is running, we can't return a
			// stack span to the heap because it could be
			// reused as a heap span, and this state
			// change would race with GC. Add it to the
			// large stack cache instead.
			log2npage := stacklog2(s.npages)
			lock(&stackLarge.lock)
			stackLarge.free[log2npage].insert(s)
			unlock(&stackLarge.lock)
489
		}
490 491 492 493 494
	}
}

var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real

495 496 497
var ptrnames = []string{
	0: "scalar",
	1: "ptr",
498 499 500 501 502 503 504 505 506
}

// Stack frame layout
//
// (x86)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// |  return address  |
507 508
// +------------------+
// |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
// +------------------+ <- frame->varp
// |     locals       |
// +------------------+
// |  args to callee  |
// +------------------+ <- frame->sp
//
// (arm)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | caller's retaddr |
// +------------------+ <- frame->varp
// |     locals       |
// +------------------+
// |  args to callee  |
// +------------------+
// |  return address  |
// +------------------+ <- frame->sp

type adjustinfo struct {
	old   stack
	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
531
	cache pcvalueCache
532 533 534

	// sghi is the highest sudog.elem on the stack.
	sghi uintptr
535 536 537 538 539
}

// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
// If so, it rewrites *vpp to point into the new stack.
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
540
	pp := (*uintptr)(vpp)
541 542
	p := *pp
	if stackDebug >= 4 {
543
		print("        ", pp, ":", hex(p), "\n")
544
	}
545 546
	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
		*pp = p + adjinfo.delta
547
		if stackDebug >= 3 {
548
			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
549 550 551 552
		}
	}
}

553 554 555 556 557 558
// Information from the compiler about the layout of stack frames.
type bitvector struct {
	n        int32 // # of bits
	bytedata *uint8
}

559 560 561 562 563 564 565
// ptrbit returns the i'th bit in bv.
// ptrbit is less efficient than iterating directly over bitvector bits,
// and should only be used in non-performance-critical code.
// See adjustpointers for an example of a high-efficiency walk of a bitvector.
func (bv *bitvector) ptrbit(i uintptr) uint8 {
	b := *(addb(bv.bytedata, i/8))
	return (b >> (i % 8)) & 1
566 567 568 569
}

// bv describes the memory starting at address scanp.
// Adjust any pointers contained therein.
570
func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
571 572 573
	minp := adjinfo.old.lo
	maxp := adjinfo.old.hi
	delta := adjinfo.delta
574
	num := uintptr(bv.n)
575 576 577 578 579 580
	// If this frame might contain channel receive slots, use CAS
	// to adjust pointers. If the slot hasn't been received into
	// yet, it may contain stack pointers and a concurrent send
	// could race with adjusting those pointers. (The sent value
	// itself can never contain stack pointers.)
	useCAS := uintptr(scanp) < adjinfo.sghi
581
	for i := uintptr(0); i < num; i += 8 {
582
		if stackDebug >= 4 {
583 584 585
			for j := uintptr(0); j < 8; j++ {
				print("        ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
			}
586
		}
587 588 589 590 591 592 593 594 595 596 597 598 599
		b := *(addb(bv.bytedata, i/8))
		for b != 0 {
			j := uintptr(sys.Ctz8(b))
			b &= b - 1
			pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
		retry:
			p := *pp
			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
				// Looks like a junk value in a pointer slot.
				// Live analysis wrong?
				getg().m.traceback = 2
				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
				throw("invalid pointer found on stack")
600
			}
601 602 603 604 605 606 607 608 609 610 611
			if minp <= p && p < maxp {
				if stackDebug >= 3 {
					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
				}
				if useCAS {
					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
						goto retry
					}
				} else {
					*pp = p + delta
612
				}
613 614 615 616 617 618 619 620
			}
		}
	}
}

// Note: the argument/return area is adjusted by the callee.
func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
	adjinfo := (*adjustinfo)(arg)
621
	if frame.continpc == 0 {
622 623 624 625 626 627 628
		// Frame is dead.
		return true
	}
	f := frame.fn
	if stackDebug >= 2 {
		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
	}
629
	if f.funcID == funcID_systemstack_switch {
630
		// A special routine at the bottom of stack of a goroutine that does an systemstack call.
631 632 633 634
		// We will allow it to be copied even though we don't
		// have full GC info for it (because it is written in asm).
		return true
	}
635

636
	locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
637 638

	// Adjust local variables if stack frame has been allocated.
639 640 641
	if locals.n > 0 {
		size := uintptr(locals.n) * sys.PtrSize
		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
642 643
	}

644
	// Adjust saved base pointer if there is one.
645
	if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
646
		if !framepointer_enabled {
647
			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
648
			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
649 650 651 652 653
			throw("bad frame layout")
		}
		if stackDebug >= 3 {
			print("      saved bp\n")
		}
654 655 656 657 658 659 660 661 662 663
		if debugCheckBP {
			// Frame pointers should always point to the next higher frame on
			// the Go stack (or be nil, for the top frame on the stack).
			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
				println("runtime: found invalid frame pointer")
				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
				throw("bad frame pointer")
			}
		}
664 665 666
		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
	}

667
	// Adjust arguments.
668
	if args.n > 0 {
669 670 671
		if stackDebug >= 3 {
			print("      args\n")
		}
672
		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
673
	}
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709

	// Adjust pointers in all stack objects (whether they are live or not).
	// See comments in mgcmark.go:scanframeworker.
	if frame.varp != 0 {
		for _, obj := range objs {
			off := obj.off
			base := frame.varp // locals base pointer
			if off >= 0 {
				base = frame.argp // arguments and return values base pointer
			}
			p := base + uintptr(off)
			if p < frame.sp {
				// Object hasn't been allocated in the frame yet.
				// (Happens when the stack bounds check fails and
				// we call into morestack.)
				continue
			}
			t := obj.typ
			gcdata := t.gcdata
			var s *mspan
			if t.kind&kindGCProg != 0 {
				// See comments in mgcmark.go:scanstack
				s = materializeGCProg(t.ptrdata, gcdata)
				gcdata = (*byte)(unsafe.Pointer(s.startAddr))
			}
			for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize {
				if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
					adjustpointer(adjinfo, unsafe.Pointer(p+i))
				}
			}
			if s != nil {
				dematerializeGCProg(s)
			}
		}
	}

710 711 712 713
	return true
}

func adjustctxt(gp *g, adjinfo *adjustinfo) {
714
	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
715 716 717 718 719 720 721 722 723 724 725 726
	if !framepointer_enabled {
		return
	}
	if debugCheckBP {
		bp := gp.sched.bp
		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
			println("runtime: found invalid top frame pointer")
			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
			throw("bad top frame pointer")
		}
	}
	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
727 728 729 730
}

func adjustdefers(gp *g, adjinfo *adjustinfo) {
	// Adjust pointers in the Defer structs.
731 732 733
	// We need to do this first because we need to adjust the
	// defer.link fields so we always work on the new stack.
	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
734
	for d := gp._defer; d != nil; d = d.link {
735 736 737
		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
738
		adjustpointer(adjinfo, unsafe.Pointer(&d.link))
739 740
		adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
		adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
741
	}
742 743 744 745 746

	// Adjust defer argument blocks the same way we adjust active stack frames.
	// Note: this code is after the loop above, so that if a defer record is
	// stack allocated, we work on the copy in the new stack.
	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
747 748 749 750 751
}

func adjustpanics(gp *g, adjinfo *adjustinfo) {
	// Panics are on stack and already adjusted.
	// Update pointer to head of list in G.
752
	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
753 754 755 756 757 758
}

func adjustsudogs(gp *g, adjinfo *adjustinfo) {
	// the data elements pointed to by a SudoG structure
	// might be in the stack.
	for s := gp.waiting; s != nil; s = s.waitlink {
759
		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
760 761 762 763 764 765 766 767 768
	}
}

func fillstack(stk stack, b byte) {
	for p := stk.lo; p < stk.hi; p++ {
		*(*byte)(unsafe.Pointer(p)) = b
	}
}

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
func findsghi(gp *g, stk stack) uintptr {
	var sghi uintptr
	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
		if stk.lo <= p && p < stk.hi && p > sghi {
			sghi = p
		}
	}
	return sghi
}

// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
// stack they refer to while synchronizing with concurrent channel
// operations. It returns the number of bytes of stack copied.
func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
	if gp.waiting == nil {
		return 0
	}

	// Lock channels to prevent concurrent send/receive.
	// It's important that we *only* do this for async
	// copystack; otherwise, gp may be in the middle of
	// putting itself on wait queues and this would
	// self-deadlock.
793
	var lastc *hchan
794
	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
795 796 797 798
		if sg.c != lastc {
			lock(&sg.c.lock)
		}
		lastc = sg.c
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	}

	// Adjust sudogs.
	adjustsudogs(gp, adjinfo)

	// Copy the part of the stack the sudogs point in to
	// while holding the lock to prevent races on
	// send/receive slots.
	var sgsize uintptr
	if adjinfo.sghi != 0 {
		oldBot := adjinfo.old.hi - used
		newBot := oldBot + adjinfo.delta
		sgsize = adjinfo.sghi - oldBot
		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
	}

	// Unlock channels.
816
	lastc = nil
817
	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
818 819 820 821
		if sg.c != lastc {
			unlock(&sg.c.lock)
		}
		lastc = sg.c
822 823 824 825 826
	}

	return sgsize
}

827
// Copies gp's stack to a new stack of a different size.
828
// Caller must have changed gp status to Gcopystack.
829 830 831 832 833 834
//
// If sync is true, this is a self-triggered stack growth and, in
// particular, no other G may be writing to gp's stack (e.g., via a
// channel operation). If sync is false, copystack protects against
// concurrent channel operations.
func copystack(gp *g, newsize uintptr, sync bool) {
835
	if gp.syscallsp != 0 {
836
		throw("stack growth not allowed in system call")
837 838 839
	}
	old := gp.stack
	if old.lo == 0 {
840
		throw("nil stackbase")
841 842 843 844
	}
	used := old.hi - gp.sched.sp

	// allocate new stack
845
	new := stackalloc(uint32(newsize))
846 847 848 849
	if stackPoisonCopy != 0 {
		fillstack(new, 0xfd)
	}
	if stackDebug >= 1 {
850
		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
851 852
	}

853
	// Compute adjustment.
854 855 856 857
	var adjinfo adjustinfo
	adjinfo.old = old
	adjinfo.delta = new.hi - old.hi

858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
	// Adjust sudogs, synchronizing with channel ops if necessary.
	ncopy := used
	if sync {
		adjustsudogs(gp, &adjinfo)
	} else {
		// sudogs can point in to the stack. During concurrent
		// shrinking, these areas may be written to. Find the
		// highest such pointer so we can handle everything
		// there and below carefully. (This shouldn't be far
		// from the bottom of the stack, so there's little
		// cost in handling everything below it carefully.)
		adjinfo.sghi = findsghi(gp, old)

		// Synchronize with channel ops and copy the part of
		// the stack they may interact with.
		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
	}

	// Copy the stack (or the rest of it) to the new location
	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
878

879 880 881
	// Adjust remaining structures that have pointers into stacks.
	// We have to do most of these before we traceback the new
	// stack because gentraceback uses them.
882 883 884
	adjustctxt(gp, &adjinfo)
	adjustdefers(gp, &adjinfo)
	adjustpanics(gp, &adjinfo)
885 886 887
	if adjinfo.sghi != 0 {
		adjinfo.sghi += adjinfo.delta
	}
888 889 890

	// Swap out old stack for new one
	gp.stack = new
891
	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
892
	gp.sched.sp = new.hi - used
893
	gp.stktopsp += adjinfo.delta
894

895 896 897
	// Adjust pointers in the new stack.
	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)

898 899 900 901
	// free old stack
	if stackPoisonCopy != 0 {
		fillstack(old, 0xfc)
	}
902
	stackfree(old)
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
}

// round x up to a power of 2.
func round2(x int32) int32 {
	s := uint(0)
	for 1<<s < x {
		s++
	}
	return 1 << s
}

// Called from runtime·morestack when more stack is needed.
// Allocate larger stack and relocate to new stack.
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
// If the GC is trying to stop this g then it will set preemptscan to true.
920
//
921 922 923 924 925 926
// This must be nowritebarrierrec because it can be called as part of
// stack growth from other nowritebarrierrec functions, but the
// compiler doesn't check this.
//
//go:nowritebarrierrec
func newstack() {
927 928
	thisg := getg()
	// TODO: double check all gp. shouldn't be getg().
929
	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
930
		throw("stack growth after fork")
931
	}
932
	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
933
		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
934
		morebuf := thisg.m.morebuf
935
		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
936
		throw("runtime: wrong goroutine in newstack")
937
	}
938 939 940

	gp := thisg.m.curg

941 942 943 944 945
	if thisg.m.curg.throwsplit {
		// Update syscallsp, syscallpc in case traceback uses them.
		morebuf := thisg.m.morebuf
		gp.syscallsp = morebuf.sp
		gp.syscallpc = morebuf.pc
946 947 948 949 950 951 952 953
		pcname, pcoff := "(unknown)", uintptr(0)
		f := findfunc(gp.sched.pc)
		if f.valid() {
			pcname = funcname(f)
			pcoff = gp.sched.pc - f.entry
		}
		print("runtime: newstack at ", pcname, "+", hex(pcoff),
			" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
954 955
			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
956

957
		thisg.m.traceback = 2 // Include runtime frames
958
		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
959
		throw("runtime: stack split at bad time")
960 961 962 963 964 965
	}

	morebuf := thisg.m.morebuf
	thisg.m.morebuf.pc = 0
	thisg.m.morebuf.lr = 0
	thisg.m.morebuf.sp = 0
966
	thisg.m.morebuf.g = 0
967

968 969 970
	// NOTE: stackguard0 may change underfoot, if another thread
	// is about to try to preempt gp. Read it just once and use that same
	// value now and below.
971
	preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
972

973 974
	// Be conservative about where we preempt.
	// We are interested in preempting user Go code, not runtime code.
975 976
	// If we're holding locks, mallocing, or preemption is disabled, don't
	// preempt.
977 978 979 980 981 982 983 984
	// This check is very early in newstack so that even the status change
	// from Grunning to Gwaiting and back doesn't happen in this case.
	// That status change by itself can be viewed as a small preemption,
	// because the GC might change Gwaiting to Gscanwaiting, and then
	// this goroutine has to wait for the GC to finish before continuing.
	// If the GC is in some way dependent on this goroutine (for example,
	// it needs a lock held by the goroutine), that small preemption turns
	// into a real deadlock.
985
	if preempt {
986
		if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
987 988 989 990 991 992
			// Let the goroutine keep running for now.
			// gp->preempt is set, so it will be preempted next time.
			gp.stackguard0 = gp.stack.lo + _StackGuard
			gogo(&gp.sched) // never return
		}
	}
993 994

	if gp.stack.lo == 0 {
995
		throw("missing stack in newstack")
996 997
	}
	sp := gp.sched.sp
998
	if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
999
		// The call to morestack cost a word.
1000
		sp -= sys.PtrSize
1001 1002 1003 1004 1005 1006 1007
	}
	if stackDebug >= 1 || sp < gp.stack.lo {
		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
	}
	if sp < gp.stack.lo {
1008
		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1009
		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1010
		throw("runtime: split stack overflow")
1011 1012
	}

1013
	if preempt {
1014
		if gp == thisg.m.g0 {
1015
			throw("runtime: preempt g0")
1016
		}
1017
		if thisg.m.p == 0 && thisg.m.locks == 0 {
1018
			throw("runtime: g is running but p is not")
1019
		}
1020 1021 1022 1023 1024

		if gp.preemptStop {
			preemptPark(gp) // never returns
		}

1025 1026
		// Synchronize with scang.
		casgstatus(gp, _Grunning, _Gwaiting)
1027
		if gp.preemptscan {
1028
			for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
1029 1030 1031 1032 1033
				// Likely to be racing with the GC as
				// it sees a _Gwaiting and does the
				// stack scan. If so, gcworkdone will
				// be set and gcphasework will simply
				// return.
1034
			}
1035
			if !gp.gcscandone {
1036 1037 1038 1039
				// gcw is safe because we're on the
				// system stack.
				gcw := &gp.m.p.ptr().gcw
				scanstack(gp, gcw)
1040 1041 1042 1043
				gp.gcscandone = true
			}
			gp.preemptscan = false
			gp.preempt = false
1044
			casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
1045
			// This clears gcscanvalid.
1046
			casgstatus(gp, _Gwaiting, _Grunning)
1047
			gp.stackguard0 = gp.stack.lo + _StackGuard
1048
			gogo(&gp.sched) // never return
1049 1050 1051 1052
		}

		// Act like goroutine called runtime.Gosched.
		casgstatus(gp, _Gwaiting, _Grunning)
1053
		gopreempt_m(gp) // never return
1054 1055 1056
	}

	// Allocate a bigger segment and move the stack.
1057
	oldsize := gp.stack.hi - gp.stack.lo
1058
	newsize := oldsize * 2
1059
	if newsize > maxstacksize {
1060
		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1061
		throw("stack overflow")
1062 1063
	}

1064 1065 1066
	// The goroutine must be executing in order to call newstack,
	// so it must be Grunning (or Gscanrunning).
	casgstatus(gp, _Grunning, _Gcopystack)
1067 1068 1069

	// The concurrent GC will not scan the stack while we are doing the copy since
	// the gp is in a Gcopystack status.
1070
	copystack(gp, newsize, true)
1071 1072 1073
	if stackDebug >= 1 {
		print("stack grow done\n")
	}
1074
	casgstatus(gp, _Gcopystack, _Grunning)
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	gogo(&gp.sched)
}

//go:nosplit
func nilfunc() {
	*(*uint8)(nil) = 0
}

// adjust Gobuf as if it executed a call to fn
// and then did an immediate gosave.
func gostartcallfn(gobuf *gobuf, fv *funcval) {
	var fn unsafe.Pointer
	if fv != nil {
1088
		fn = unsafe.Pointer(fv.fn)
1089 1090 1091
	} else {
		fn = unsafe.Pointer(funcPC(nilfunc))
	}
1092
	gostartcall(gobuf, fn, unsafe.Pointer(fv))
1093 1094 1095 1096
}

// Maybe shrink the stack being used by gp.
// Called at garbage collection time.
1097
// gp must be stopped, but the world need not be.
1098
func shrinkstack(gp *g) {
1099
	gstatus := readgstatus(gp)
1100
	if gp.stack.lo == 0 {
1101
		throw("missing stack in shrinkstack")
1102
	}
1103 1104 1105
	if gstatus&_Gscan == 0 {
		throw("bad status in shrinkstack")
	}
1106 1107 1108 1109 1110 1111
	// Check for self-shrinks while in a libcall. These may have
	// pointers into the stack disguised as uintptrs, but these
	// code paths should all be nosplit.
	if gp == getg().m.curg && gp.m.libcallsp != 0 {
		throw("shrinking stack in libcall")
	}
1112

1113 1114 1115
	if debug.gcshrinkstackoff > 0 {
		return
	}
1116 1117
	f := findfunc(gp.startpc)
	if f.valid() && f.funcID == funcID_gcBgMarkWorker {
1118 1119 1120 1121
		// We're not allowed to shrink the gcBgMarkWorker
		// stack (see gcBgMarkWorker for explanation).
		return
	}
1122

1123
	oldsize := gp.stack.hi - gp.stack.lo
1124
	newsize := oldsize / 2
1125 1126
	// Don't shrink the allocation below the minimum-sized stack
	// allocation.
1127
	if newsize < _FixedStack {
1128
		return
1129
	}
1130 1131 1132 1133 1134 1135 1136 1137
	// Compute how much of the stack is currently in use and only
	// shrink the stack if gp is using less than a quarter of its
	// current stack. The currently used stack includes everything
	// down to the SP plus the stack guard space that ensures
	// there's room for nosplit functions.
	avail := gp.stack.hi - gp.stack.lo
	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
		return
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	}

	// We can't copy the stack if we're in a syscall.
	// The syscall might have pointers into the stack.
	if gp.syscallsp != 0 {
		return
	}

	if stackDebug > 0 {
		print("shrinking stack ", oldsize, "->", newsize, "\n")
	}
1149

1150
	copystack(gp, newsize, false)
1151 1152
}

1153 1154 1155 1156 1157
// freeStackSpans frees unused stack spans at the end of GC.
func freeStackSpans() {

	// Scan stack pools for empty stack spans.
	for order := range stackpool {
Changkun Ou's avatar
Changkun Ou committed
1158 1159
		lock(&stackpool[order].item.mu)
		list := &stackpool[order].item.span
1160
		for s := list.first; s != nil; {
1161
			next := s.next
1162
			if s.allocCount == 0 {
1163
				list.remove(s)
1164
				s.manualFreeList = 0
1165
				osStackFree(s)
1166
				mheap_.freeManual(s, &memstats.stacks_inuse)
1167 1168 1169
			}
			s = next
		}
Changkun Ou's avatar
Changkun Ou committed
1170
		unlock(&stackpool[order].item.mu)
1171
	}
1172

1173 1174 1175 1176 1177 1178
	// Free large stack spans.
	lock(&stackLarge.lock)
	for i := range stackLarge.free {
		for s := stackLarge.free[i].first; s != nil; {
			next := s.next
			stackLarge.free[i].remove(s)
1179
			osStackFree(s)
1180
			mheap_.freeManual(s, &memstats.stacks_inuse)
1181 1182 1183 1184
			s = next
		}
	}
	unlock(&stackLarge.lock)
1185
}
1186

1187 1188 1189
// getStackMap returns the locals and arguments live pointer maps, and
// stack object list for frame.
func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
	targetpc := frame.continpc
	if targetpc == 0 {
		// Frame is dead. Return empty bitvectors.
		return
	}

	f := frame.fn
	pcdata := int32(-1)
	if targetpc != f.entry {
		// Back up to the CALL. If we're at the function entry
		// point, we want to use the entry map (-1), even if
		// the first instruction of the function changes the
		// stack map.
		targetpc--
		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
	}
	if pcdata == -1 {
		// We do not have a valid pcdata value but there might be a
		// stackmap for this function. It is likely that we are looking
		// at the function prologue, assume so and hope for the best.
		pcdata = 0
	}

	// Local variables.
	size := frame.varp - frame.sp
	var minsize uintptr
	switch sys.ArchFamily {
	case sys.ARM64:
		minsize = sys.SpAlign
	default:
		minsize = sys.MinFrameSize
	}
	if size > minsize {
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
		var stkmap *stackmap
		stackid := pcdata
		if f.funcID != funcID_debugCallV1 {
			stkmap = (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
		} else {
			// debugCallV1's stack map is the register map
			// at its call site.
			callerPC := frame.lr
			caller := findfunc(callerPC)
			if !caller.valid() {
				println("runtime: debugCallV1 called by unknown caller", hex(callerPC))
				throw("bad debugCallV1")
			}
			stackid = int32(-1)
			if callerPC != caller.entry {
				callerPC--
				stackid = pcdatavalue(caller, _PCDATA_RegMapIndex, callerPC, cache)
			}
			if stackid == -1 {
				stackid = 0 // in prologue
			}
			stkmap = (*stackmap)(funcdata(caller, _FUNCDATA_RegPointerMaps))
		}
		if stkmap == nil || stkmap.n <= 0 {
1247 1248 1249 1250
			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
			throw("missing stackmap")
		}
		// If nbit == 0, there's no work to do.
1251 1252
		if stkmap.nbit > 0 {
			if stackid < 0 || stackid >= stkmap.n {
1253
				// don't know where we are
1254
				print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1255 1256
				throw("bad symbol table")
			}
1257
			locals = stackmapdata(stkmap, stackid)
1258
			if stackDebug >= 3 && debug {
1259
				print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
1260 1261 1262 1263 1264 1265 1266 1267 1268
			}
		} else if stackDebug >= 3 && debug {
			print("      no locals to adjust\n")
		}
	}

	// Arguments.
	if frame.arglen > 0 {
		if frame.argmap != nil {
1269 1270 1271
			// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
			// In this case, arglen specifies how much of the args section is actually live.
			// (It could be either all the args + results, or just the args.)
1272
			args = *frame.argmap
1273 1274 1275 1276
			n := int32(frame.arglen / sys.PtrSize)
			if n < args.n {
				args.n = n // Don't use more of the arguments than arglen.
			}
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
		} else {
			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
			if stackmap == nil || stackmap.n <= 0 {
				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
				throw("missing stackmap")
			}
			if pcdata < 0 || pcdata >= stackmap.n {
				// don't know where we are
				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
				throw("bad symbol table")
			}
			if stackmap.nbit > 0 {
				args = stackmapdata(stackmap, pcdata)
			}
		}
	}
1293 1294 1295 1296 1297 1298 1299 1300

	// stack objects.
	p := funcdata(f, _FUNCDATA_StackObjects)
	if p != nil {
		n := *(*uintptr)(p)
		p = add(p, sys.PtrSize)
		*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
		// Note: the noescape above is needed to keep
1301
		// getStackMap from "leaking param content:
1302 1303 1304 1305 1306
		// frame".  That leak propagates up to getgcmask, then
		// GCMask, then verifyGCInfo, which converts the stack
		// gcinfo tests into heap gcinfo tests :(
	}

1307 1308 1309
	return
}

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
// A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
// This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects.
type stackObjectRecord struct {
	// offset in frame
	// if negative, offset from varp
	// if non-negative, offset from argp
	off int
	typ *_type
}

1320 1321
// This is exported as ABI0 via linkname so obj can call it.
//
1322
//go:nosplit
1323
//go:linkname morestackc
1324
func morestackc() {
1325
	throw("attempt to execute system stack code on user stack")
1326
}