Commit 8299741f authored by Kirill Smelkov's avatar Kirill Smelkov

tracing/runtime: Add support for Go1.22

Generate g for today's state of Go 1.22 (go1.22.0-0-ga10e42f219).
Compared to Go1.21 many new fields and a couple of new types were
introduced as shown by the diff below.

Regenerated files stay without changes for Go1.21 and previous releases.

---- 8< ----
diff --git a/zruntime_g_go1.21.go b/zruntime_g_go1.22.go
index c55d865..910382b 100644
--- a/zruntime_g_go1.21.go
+++ b/zruntime_g_go1.22.go
@@ -1,7 +1,7 @@
 // Code generated by g_typedef; DO NOT EDIT.

-//go:build go1.21 && !go1.22
-// +build go1.21,!go1.22
+//go:build go1.22 && !go1.23
+// +build go1.22,!go1.23

 package xruntime

@@ -13,7 +13,7 @@ type g struct {
 	// stack describes the actual stack memory: [stack.lo, stack.hi).
 	// stackguard0 is the stack pointer compared in the Go stack growth prologue.
 	// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
-	// stackguard1 is the stack pointer compared in the C stack growth prologue.
+	// stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue.
 	// It is stack.lo+StackGuard on g0 and gsignal stacks.
 	// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
 	stack       stack   // offset known to runtime/cgo
@@ -30,7 +30,7 @@ type g struct {
 	// param is a generic pointer parameter field used to pass
 	// values in particular contexts where other storage for the
 	// parameter would be difficult to find. It is currently used
-	// in three ways:
+	// in four ways:
 	// 1. When a channel operation wakes up a blocked goroutine, it sets param to
 	//    point to the sudog of the completed blocking operation.
 	// 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
@@ -38,6 +38,8 @@ type g struct {
 	//    stack may have moved in the meantime.
 	// 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
 	//    closure in the runtime is forbidden.
+	// 4. When a panic is recovered and control returns to the respective frame,
+	//    param may point to a savedOpenDeferState.
 	param        unsafe.Pointer
 	atomicstatus atomic.Uint32
 	stackLock    uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
@@ -67,8 +69,13 @@ type g struct {
 	// park on a chansend or chanrecv. Used to signal an unsafe point
 	// for stack shrinking.
 	parkingOnChan atomic.Bool
+	// inMarkAssist indicates whether the goroutine is in mark assist.
+	// Used by the execution tracer.
+	inMarkAssist bool
+	coroexit     bool // argument to coroswitch_m

 	raceignore    int8  // ignore race detection events
+	nocgocallback bool  // whether disable callback from C
 	tracking      bool  // whether we're tracking this G for sched latency statistics
 	trackingSeq   uint8 // used to decide whether to track this G
 	trackingStamp int64 // timestamp of when the G last started being tracked
@@ -90,6 +97,8 @@ type g struct {
 	timer         *timer         // cached timer for time.Sleep
 	selectDone    atomic.Uint32  // are we participating in a select and did someone win the race?

+	coroarg *coro // argument during coroutine transfers
+
 	// goroutineProfiled indicates the status of this goroutine's stack for the
 	// current in-progress goroutine profile
 	goroutineProfiled goroutineProfileStateHolder
@@ -114,36 +123,39 @@ type _panic struct {
 	argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
 	arg  interface{}    // argument to panic
 	link *_panic        // link to earlier panic
-	pc        uintptr        // where to return to in runtime if this panic is bypassed
-	sp        unsafe.Pointer // where to return to in runtime if this panic is bypassed
-	recovered bool           // whether this panic is over
-	aborted   bool           // the panic was aborted
+
+	// startPC and startSP track where _panic.start was called.
+	startPC uintptr
+	startSP unsafe.Pointer
+
+	// The current stack frame that we're running deferred calls for.
+	sp unsafe.Pointer
+	lr uintptr
+	fp unsafe.Pointer
+
+	// retpc stores the PC where the panic should jump back to, if the
+	// function last returned by _panic.next() recovers the panic.
+	retpc uintptr
+
+	// Extra state for handling open-coded defers.
+	deferBitsPtr *uint8
+	slotsPtr     unsafe.Pointer
+
+	recovered   bool // whether this panic has been recovered
 	goexit      bool
+	deferreturn bool
 }
 type _defer struct {
-	started bool
 	heap      bool
-	// openDefer indicates that this _defer is for a frame with open-coded
-	// defers. We have only one defer record for the entire frame (which may
-	// currently have 0, 1, or more defers active).
-	openDefer bool
+	rangefunc bool    // true for rangefunc list
 	sp        uintptr // sp at time of defer
 	pc        uintptr // pc at time of defer
 	fn        func()  // can be nil for open-coded defers
-	_panic    *_panic // panic that is running defer
 	link      *_defer // next defer on G; can point to either heap or stack!

-	// If openDefer is true, the fields below record values about the stack
-	// frame and associated function that has the open-coded defer(s). sp
-	// above will be the sp for the frame, and pc will be address of the
-	// deferreturn call in the function.
-	fd   unsafe.Pointer // funcdata for the function associated with the frame
-	varp uintptr        // value of varp for the stack frame
-	// framepc is the current pc associated with the stack frame. Together,
-	// with sp above (which is the sp associated with the stack frame),
-	// framepc/sp can be used as pc/sp pair to continue a stack trace via
-	// gentraceback().
-	framepc uintptr
+	// If rangefunc is true, *head is the head of the atomic linked list
+	// during a range-over-func execution.
+	head *atomic.Pointer[_defer]
 }
 type gobuf struct {
 	// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
@@ -203,12 +215,31 @@ type ancestorInfo struct {
 }
 type goroutineProfileStateHolder atomic.Uint32
 type gTraceState struct {
-	sysExitTime        traceTime // timestamp when syscall has returned
-	tracedSyscallEnter bool      // syscall or cgo was entered while trace was enabled or StartTrace has emitted EvGoInSyscall about this goroutine
-	seq                uint64    // trace event sequencer
-	lastP              puintptr  // last P emitted an event for this goroutine
+	traceSchedResourceState
 }
 type traceTime uint64
+type coro struct {
+	gp guintptr
+	f  func(*coro)
+}
+type traceSchedResourceState struct {
+	// statusTraced indicates whether a status event was traced for this resource
+	// a particular generation.
+	//
+	// There are 3 of these because when transitioning across generations, traceAdvance
+	// needs to be able to reliably observe whether a status was traced for the previous
+	// generation, while we need to clear the value for the next generation.
+	statusTraced [3]atomic.Uint32
+
+	// seq is the sequence counter for this scheduling resource's events.
+	// The purpose of the sequence counter is to establish a partial order between
+	// events that don't obviously happen serially (same M) in the stream ofevents.
+	//
+	// There are two of these so that we can reset the counter on each generation.
+	// This saves space in the resulting trace by keeping the counter small and allows
+	// GoStatus and GoCreate events to omit a sequence number (implicitly 0).
+	seq [2]uint64
+}
 type uintreg uint          // FIXME wrong on amd64p32
 type m struct{}            // FIXME stub
 type sudog struct{}        // FIXME stub
parent 95433de3
#!/bin/bash
# g_typedef -- generate type definition of runtime.g
#
# Copyright (C) 2017-2023 Nexedi SA and Contributors.
# Copyright (C) 2017-2024 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
......@@ -82,6 +82,11 @@ typedef_g() {
typedef runtime.gTraceState
typedef runtime.traceTime
fi
if (( $govern >= 122 )); then
typedef runtime.coro
typedef runtime.traceSchedResourceState
fi
}
# typedef_g_fixed - print adjusted <g> & friends definitions
......@@ -126,7 +131,7 @@ gen_zruntime() {
# main driver
gov="go18 go19 go1.10 go1.11 go1.12 go1.13 go1.14 go1.15 go1.16 go1.17 go1.18 go1.19 go1.20 go1.21"
gov="go18 go19 go1.10 go1.11 go1.12 go1.13 go1.14 go1.15 go1.16 go1.17 go1.18 go1.19 go1.20 go1.21 go1.22"
for g in $gov; do
goset $g
......
// Code generated by g_typedef; DO NOT EDIT.
//go:build go1.22 && !go1.23
// +build go1.22,!go1.23
package xruntime
import "unsafe"
import "sync/atomic"
type g struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
// stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
stack stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
_panic *_panic // innermost panic - offset known to liblink
_defer *_defer // innermost defer
m *m // current m; offset known to arm liblink
sched gobuf
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
stktopsp uintptr // expected sp at top of stack, to check in traceback
// param is a generic pointer parameter field used to pass
// values in particular contexts where other storage for the
// parameter would be difficult to find. It is currently used
// in four ways:
// 1. When a channel operation wakes up a blocked goroutine, it sets param to
// point to the sudog of the completed blocking operation.
// 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
// the GC cycle. It is unsafe to do so in any other way, because the goroutine's
// stack may have moved in the meantime.
// 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
// closure in the runtime is forbidden.
// 4. When a panic is recovered and control returns to the respective frame,
// param may point to a savedOpenDeferState.
param unsafe.Pointer
atomicstatus atomic.Uint32
stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
goid uint64
schedlink guintptr
waitsince int64 // approx time when the g become blocked
waitreason waitReason // if status==Gwaiting
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
preemptShrink bool // shrink stack at synchronous safe point
// asyncSafePoint is set if g is stopped at an asynchronous
// safe point. This means there are frames on the stack
// without precise pointer information.
asyncSafePoint bool
paniconfault bool // panic (instead of crash) on unexpected fault address
gcscandone bool // g has scanned stack; protected by _Gscan bit in status
throwsplit bool // must not split stack
// activeStackChans indicates that there are unlocked channels
// pointing into this goroutine's stack. If true, stack
// copying needs to acquire channel locks to protect these
// areas of the stack.
activeStackChans bool
// parkingOnChan indicates that the goroutine is about to
// park on a chansend or chanrecv. Used to signal an unsafe point
// for stack shrinking.
parkingOnChan atomic.Bool
// inMarkAssist indicates whether the goroutine is in mark assist.
// Used by the execution tracer.
inMarkAssist bool
coroexit bool // argument to coroswitch_m
raceignore int8 // ignore race detection events
nocgocallback bool // whether disable callback from C
tracking bool // whether we're tracking this G for sched latency statistics
trackingSeq uint8 // used to decide whether to track this G
trackingStamp int64 // timestamp of when the G last started being tracked
runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
lockedm muintptr
sig uint32
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
parentGoid uint64 // goid of goroutine that created this goroutine
gopc uintptr // pc of go statement that created this goroutine
ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
startpc uintptr // pc of goroutine function
racectx uintptr
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
cgoCtxt []uintptr // cgo traceback context
labels unsafe.Pointer // profiler labels
timer *timer // cached timer for time.Sleep
selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
coroarg *coro // argument during coroutine transfers
// goroutineProfiled indicates the status of this goroutine's stack for the
// current in-progress goroutine profile
goroutineProfiled goroutineProfileStateHolder
// Per-G tracer state.
trace gTraceState
// gcAssistBytes is this G's GC assist credit in terms of
// bytes allocated. If this is positive, then the G has credit
// to allocate gcAssistBytes bytes without assisting. If this
// is negative, then the G must correct this by performing
// scan work. We track this in bytes to make it fast to update
// and check for debt in the malloc hot path. The assist ratio
// determines how this corresponds to scan work debt.
gcAssistBytes int64
}
type stack struct {
lo uintptr
hi uintptr
}
type _panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
link *_panic // link to earlier panic
// startPC and startSP track where _panic.start was called.
startPC uintptr
startSP unsafe.Pointer
// The current stack frame that we're running deferred calls for.
sp unsafe.Pointer
lr uintptr
fp unsafe.Pointer
// retpc stores the PC where the panic should jump back to, if the
// function last returned by _panic.next() recovers the panic.
retpc uintptr
// Extra state for handling open-coded defers.
deferBitsPtr *uint8
slotsPtr unsafe.Pointer
recovered bool // whether this panic has been recovered
goexit bool
deferreturn bool
}
type _defer struct {
heap bool
rangefunc bool // true for rangefunc list
sp uintptr // sp at time of defer
pc uintptr // pc at time of defer
fn func() // can be nil for open-coded defers
link *_defer // next defer on G; can point to either heap or stack!
// If rangefunc is true, *head is the head of the atomic linked list
// during a range-over-func execution.
head *atomic.Pointer[_defer]
}
type gobuf struct {
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
//
// ctxt is unusual with respect to GC: it may be a
// heap-allocated funcval, so GC needs to track it, but it
// needs to be set and cleared from assembly, where it's
// difficult to have write barriers. However, ctxt is really a
// saved, live register, and we only ever exchange it between
// the real register and the gobuf. Hence, we treat it as a
// root during stack scanning, which means assembly that saves
// and restores it doesn't need write barriers. It's still
// typed as a pointer so that any other writes from Go get
// write barriers.
sp uintptr
pc uintptr
g guintptr
ctxt unsafe.Pointer
ret uintptr
lr uintptr
bp uintptr // for framepointer-enabled architectures
}
type funcval struct {
fn uintptr
}
type timer struct {
// If this timer is on a heap, which P's heap it is on.
// puintptr rather than *p to match uintptr in the versions
// of this struct defined in other packages.
pp puintptr
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
// each time calling f(arg, now) in the timer goroutine, so f must be
// a well-behaved function and not block.
//
// when must be positive on an active timer.
when int64
period int64
f func(interface{}, uintptr)
arg interface{}
seq uintptr
// What to set the when field to in timerModifiedXX status.
nextwhen int64
// The status field holds one of the values below.
status atomic.Uint32
}
type guintptr uintptr
type puintptr uintptr
type muintptr uintptr
type waitReason uint8
type ancestorInfo struct {
pcs []uintptr // pcs from the stack of this goroutine
goid uint64 // goroutine id of this goroutine; original goroutine possibly dead
gopc uintptr // pc of go statement that created this goroutine
}
type goroutineProfileStateHolder atomic.Uint32
type gTraceState struct {
traceSchedResourceState
}
type traceTime uint64
type coro struct {
gp guintptr
f func(*coro)
}
type traceSchedResourceState struct {
// statusTraced indicates whether a status event was traced for this resource
// a particular generation.
//
// There are 3 of these because when transitioning across generations, traceAdvance
// needs to be able to reliably observe whether a status was traced for the previous
// generation, while we need to clear the value for the next generation.
statusTraced [3]atomic.Uint32
// seq is the sequence counter for this scheduling resource's events.
// The purpose of the sequence counter is to establish a partial order between
// events that don't obviously happen serially (same M) in the stream ofevents.
//
// There are two of these so that we can reset the counter on each generation.
// This saves space in the resulting trace by keeping the counter small and allows
// GoStatus and GoCreate events to omit a sequence number (implicitly 0).
seq [2]uint64
}
type uintreg uint // FIXME wrong on amd64p32
type m struct{} // FIXME stub
type sudog struct{} // FIXME stub
type timersBucket struct{} // FIXME stub
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment