Commit 484f801f authored by Russ Cox's avatar Russ Cox

runtime: reorganize memory code

Move code from malloc1.go, malloc2.go, mem.go, mgc0.go into
appropriate locations.

Factor mgc.go into mgc.go, mgcmark.go, mgcsweep.go, mstats.go.

A lot of this code was in certain files because the right place was in
a C file but it was written in Go, or vice versa. This is one step toward
making things actually well-organized again.

Change-Id: I6741deb88a7cfb1c17ffe0bcca3989e10207968f
Reviewed-on: https://go-review.googlesource.com/5300Reviewed-by: default avatarAustin Clements <austin@google.com>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent d384545a
......@@ -13,6 +13,24 @@ package runtime
import "unsafe"
//go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
func runtime_debug_WriteHeapDump(fd uintptr) {
semacquire(&worldsema, false)
gp := getg()
gp.m.preemptoff = "write heap dump"
systemstack(stoptheworld)
systemstack(func() {
writeheapdump_m(fd)
})
gp.m.preemptoff = ""
gp.m.locks++
semrelease(&worldsema)
systemstack(starttheworld)
gp.m.locks--
}
const (
fieldKindEol = 0
fieldKindPtr = 1
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -82,6 +82,12 @@ const (
typeShift = 2
)
// Information from the compiler about the layout of stack frames.
type bitvector struct {
n int32 // # of bits
bytedata *uint8
}
// addb returns the byte pointer p+n.
//go:nowritebarrier
func addb(p *byte, n uintptr) *byte {
......
......@@ -2,14 +2,63 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Per-P malloc cache for small objects.
//
// See malloc.h for an overview.
package runtime
import "unsafe"
// Per-thread (in Go, per-P) cache for small objects.
// No locking needed because it is per-thread (per-P).
type mcache struct {
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
next_sample int32 // trigger heap sample after allocating this many bytes
local_cachealloc intptr // bytes allocated (or freed) from cache since last lock of heap
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.
tiny unsafe.Pointer
tinyoffset uintptr
local_tinyallocs uintptr // number of tiny allocs not counted in other stats
// The rest is not accessed on every malloc.
alloc [_NumSizeClasses]*mspan // spans to allocate from
stackcache [_NumStackOrders]stackfreelist
sudogcache *sudog
// Local allocator stats, flushed during GC.
local_nlookup uintptr // number of pointer lookups
local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
}
// A gclink is a node in a linked list of blocks, like mlink,
// but it is opaque to the garbage collector.
// The GC does not trace the pointers during collection,
// and the compiler does not emit write barriers for assignments
// of gclinkptr values. Code should store references to gclinks
// as gclinkptr, not as *gclink.
type gclink struct {
next gclinkptr
}
// A gclinkptr is a pointer to a gclink, but it is opaque
// to the garbage collector.
type gclinkptr uintptr
// ptr returns the *gclink form of p.
// The result should be used for accessing fields, not stored
// in other data structures.
func (p gclinkptr) ptr() *gclink {
return (*gclink)(unsafe.Pointer(p))
}
type stackfreelist struct {
list gclinkptr // linked list of free stacks
size uintptr // total size of stacks in list
}
// dummy MSpan that contains no free objects.
var emptymspan mspan
......
......@@ -12,6 +12,14 @@
package runtime
// Central list of free objects of a given size.
type mcentral struct {
lock mutex
sizeclass int32
nonempty mspan // list of spans with a free object
empty mspan // list of spans with no free objects (or cached in an mcache)
}
// Initialize a single central free list.
func mCentral_Init(c *mcentral, sizeclass int32) {
c.sizeclass = sizeclass
......
......@@ -8,6 +8,14 @@ package runtime
import "unsafe"
type finblock struct {
alllink *finblock
next *finblock
cnt int32
_ int32
fin [(_FinBlockSize - 2*ptrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}
var finlock mutex // protects the following variables
var fing *g // goroutine that runs finalizers
var finq *finblock // list of finalizers that are to be executed
......@@ -17,6 +25,15 @@ var fingwait bool
var fingwake bool
var allfin *finblock // list of all blocks
// NOTE: Layout known to queuefinalizer.
type finalizer struct {
fn *funcval // function to call
arg unsafe.Pointer // ptr to object
nret uintptr // bytes of return values from fn
fint *_type // type of first argument of fn
ot *ptrtype // type of ptr to object
}
var finalizer1 = [...]byte{
// Each Finalizer is 5 words, ptr ptr uintptr ptr ptr.
// Each byte describes 4 words.
......
......@@ -10,6 +10,34 @@ package runtime
import "unsafe"
// FixAlloc is a simple free-list allocator for fixed size objects.
// Malloc uses a FixAlloc wrapped around sysAlloc to manages its
// MCache and MSpan objects.
//
// Memory returned by FixAlloc_Alloc is not zeroed.
// The caller is responsible for locking around FixAlloc calls.
// Callers can keep state in the object but the first word is
// smashed by freeing and reallocating.
type fixalloc struct {
size uintptr
first unsafe.Pointer // go func(unsafe.pointer, unsafe.pointer); f(arg, p) called first time p is returned
arg unsafe.Pointer
list *mlink
chunk *byte
nchunk uint32
inuse uintptr // in-use bytes now
stat *uint64
}
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
// Since assignments to mlink.next will result in a write barrier being preformed
// this can not be used by some of the internal GC structures. For example when
// the sweeper is placing an unmarked object on the free list it does not want the
// write barrier to be called since that could result in the object being reachable.
type mlink struct {
next *mlink
}
// Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory.
func fixAlloc_Init(f *fixalloc, size uintptr, first func(unsafe.Pointer, unsafe.Pointer), arg unsafe.Pointer, stat *uint64) {
......
This diff is collapsed.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import _ "unsafe" // for go:linkname
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory() {
gogc(2) // force GC and do eager sweep
systemstack(scavenge_m)
}
var poolcleanup func()
//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
func sync_runtime_registerPoolCleanup(f func()) {
poolcleanup = f
}
func clearpools() {
// clear sync.Pools
if poolcleanup != nil {
poolcleanup()
}
for _, p := range &allp {
if p == nil {
break
}
// clear tinyalloc pool
if c := p.mcache; c != nil {
c.tiny = nil
c.tinyoffset = 0
// disconnect cached list before dropping it on the floor,
// so that a dangling ref to one entry does not pin all of them.
var sg, sgnext *sudog
for sg = c.sudogcache; sg != nil; sg = sgnext {
sgnext = sg.next
sg.next = nil
}
c.sudogcache = nil
}
// clear defer pools
for i := range p.deferpool {
// disconnect cached list before dropping it on the floor,
// so that a dangling ref to one entry does not pin all of them.
var d, dlink *_defer
for d = p.deferpool[i]; d != nil; d = dlink {
dlink = d.link
d.link = nil
}
p.deferpool[i] = nil
}
}
}
// backgroundgc is running in a goroutine and does the concurrent GC work.
// bggc holds the state of the backgroundgc.
func backgroundgc() {
bggc.g = getg()
for {
gcwork(0)
lock(&bggc.lock)
bggc.working = 0
goparkunlock(&bggc.lock, "Concurrent GC wait", traceEvGoBlock)
}
}
func bgsweep() {
sweep.g = getg()
for {
for gosweepone() != ^uintptr(0) {
sweep.nbgsweep++
Gosched()
}
lock(&gclock)
if !gosweepdone() {
// This can happen if a GC runs between
// gosweepone returning ^0 above
// and the lock being acquired.
unlock(&gclock)
continue
}
sweep.parked = true
goparkunlock(&gclock, "GC sweep wait", traceEvGoBlock)
}
}
This diff is collapsed.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: sweeping
package runtime
import "unsafe"
var sweep sweepdata
// State of background sweep.
// Protected by gclock.
type sweepdata struct {
g *g
parked bool
started bool
spanidx uint32 // background sweeper position
nbgsweep uint32
npausesweep uint32
}
var gclock mutex
//go:nowritebarrier
func finishsweep_m() {
// The world is stopped so we should be able to complete the sweeps
// quickly.
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
}
// There may be some other spans being swept concurrently that
// we need to wait for. If finishsweep_m is done with the world stopped
// this code is not required.
sg := mheap_.sweepgen
for _, s := range work.spans {
if s.sweepgen != sg && s.state == _MSpanInUse {
mSpan_EnsureSwept(s)
}
}
}
func bgsweep() {
sweep.g = getg()
for {
for gosweepone() != ^uintptr(0) {
sweep.nbgsweep++
Gosched()
}
lock(&gclock)
if !gosweepdone() {
// This can happen if a GC runs between
// gosweepone returning ^0 above
// and the lock being acquired.
unlock(&gclock)
continue
}
sweep.parked = true
goparkunlock(&gclock, "GC sweep wait", traceEvGoBlock)
}
}
// sweeps one span
// returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
//go:nowritebarrier
func sweepone() uintptr {
_g_ := getg()
// increment locks to ensure that the goroutine is not preempted
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
_g_.m.locks++
sg := mheap_.sweepgen
for {
idx := xadd(&sweep.spanidx, 1) - 1
if idx >= uint32(len(work.spans)) {
mheap_.sweepdone = 1
_g_.m.locks--
return ^uintptr(0)
}
s := work.spans[idx]
if s.state != mSpanInUse {
s.sweepgen = sg
continue
}
if s.sweepgen != sg-2 || !cas(&s.sweepgen, sg-2, sg-1) {
continue
}
npages := s.npages
if !mSpan_Sweep(s, false) {
npages = 0
}
_g_.m.locks--
return npages
}
}
//go:nowritebarrier
func gosweepone() uintptr {
var ret uintptr
systemstack(func() {
ret = sweepone()
})
return ret
}
//go:nowritebarrier
func gosweepdone() bool {
return mheap_.sweepdone != 0
}
// Returns only when span s has been swept.
//go:nowritebarrier
func mSpan_EnsureSwept(s *mspan) {
// Caller must disable preemption.
// Otherwise when this function returns the span can become unswept again
// (if GC is triggered on another goroutine).
_g_ := getg()
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
throw("MSpan_EnsureSwept: m is not locked")
}
sg := mheap_.sweepgen
if atomicload(&s.sweepgen) == sg {
return
}
// The caller must be sure that the span is a MSpanInUse span.
if cas(&s.sweepgen, sg-2, sg-1) {
mSpan_Sweep(s, false)
return
}
// unfortunate condition, and we don't have efficient means to wait
for atomicload(&s.sweepgen) != sg {
osyield()
}
}
// Sweep frees or collects finalizers for blocks not marked in the mark phase.
// It clears the mark bits in preparation for the next GC round.
// Returns true if the span was returned to heap.
// If preserve=true, don't return it to heap nor relink in MCentral lists;
// caller takes care of it.
//TODO go:nowritebarrier
func mSpan_Sweep(s *mspan, preserve bool) bool {
if checkmarkphase {
throw("MSpan_Sweep: checkmark only runs in STW and after the sweep")
}
// It's critical that we enter this function with preemption disabled,
// GC must not start while we are in the middle of this function.
_g_ := getg()
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
throw("MSpan_Sweep: m is not locked")
}
sweepgen := mheap_.sweepgen
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("MSpan_Sweep: bad span state")
}
if trace.enabled {
traceGCSweepStart()
}
cl := s.sizeclass
size := s.elemsize
res := false
nfree := 0
var head, end gclinkptr
c := _g_.m.mcache
sweepgenset := false
// Mark any free objects in this span so we don't collect them.
for link := s.freelist; link.ptr() != nil; link = link.ptr().next {
heapBitsForAddr(uintptr(link)).setMarkedNonAtomic()
}
// Unlink & free special records for any objects we're about to free.
specialp := &s.specials
special := *specialp
for special != nil {
// A finalizer can be set for an inner byte of an object, find object beginning.
p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
hbits := heapBitsForAddr(p)
if !hbits.isMarked() {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
// about to free object: splice out special record
y := special
special = special.next
*specialp = special
if !freespecial(y, unsafe.Pointer(p), size, false) {
// stop freeing of object if it has a finalizer
hbits.setMarkedNonAtomic()
}
} else {
// object is still live: keep special record
specialp = &special.next
special = *specialp
}
}
// Sweep through n objects of given size starting at p.
// This thread owns the span now, so it can manipulate
// the block bitmap without atomic operations.
size, n, _ := s.layout()
heapBitsSweepSpan(s.base(), size, n, func(p uintptr) {
// At this point we know that we are looking at garbage object
// that needs to be collected.
if debug.allocfreetrace != 0 {
tracefree(unsafe.Pointer(p), size)
}
// Reset to allocated+noscan.
if cl == 0 {
// Free large span.
if preserve {
throw("can't preserve large span")
}
heapBitsForSpan(p).clearSpan(s.layout())
s.needzero = 1
// important to set sweepgen before returning it to heap
atomicstore(&s.sweepgen, sweepgen)
sweepgenset = true
// NOTE(rsc,dvyukov): The original implementation of efence
// in CL 22060046 used SysFree instead of SysFault, so that
// the operating system would eventually give the memory
// back to us again, so that an efence program could run
// longer without running out of memory. Unfortunately,
// calling SysFree here without any kind of adjustment of the
// heap data structures means that when the memory does
// come back to us, we have the wrong metadata for it, either in
// the MSpan structures or in the garbage collection bitmap.
// Using SysFault here means that the program will run out of
// memory fairly quickly in efence mode, but at least it won't
// have mysterious crashes due to confused memory reuse.
// It should be possible to switch back to SysFree if we also
// implement and then call some kind of MHeap_DeleteSpan.
if debug.efence > 0 {
s.limit = 0 // prevent mlookup from finding this span
sysFault(unsafe.Pointer(p), size)
} else {
mHeap_Free(&mheap_, s, 1)
}
c.local_nlargefree++
c.local_largefree += size
reduction := int64(size) * int64(gcpercent+100) / 100
if int64(memstats.next_gc)-reduction > int64(heapminimum) {
xadd64(&memstats.next_gc, -reduction)
} else {
atomicstore64(&memstats.next_gc, heapminimum)
}
res = true
} else {
// Free small object.
if size > 2*ptrSize {
*(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
} else if size > ptrSize {
*(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
}
if head.ptr() == nil {
head = gclinkptr(p)
} else {
end.ptr().next = gclinkptr(p)
}
end = gclinkptr(p)
end.ptr().next = gclinkptr(0x0bade5)
nfree++
}
})
// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
// because of the potential for a concurrent free/SetFinalizer.
// But we need to set it before we make the span available for allocation
// (return it to heap or mcentral), because allocation code assumes that a
// span is already swept if available for allocation.
if !sweepgenset && nfree == 0 {
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("MSpan_Sweep: bad span state after sweep")
}
atomicstore(&s.sweepgen, sweepgen)
}
if nfree > 0 {
c.local_nsmallfree[cl] += uintptr(nfree)
c.local_cachealloc -= intptr(uintptr(nfree) * size)
reduction := int64(nfree) * int64(size) * int64(gcpercent+100) / 100
if int64(memstats.next_gc)-reduction > int64(heapminimum) {
xadd64(&memstats.next_gc, -reduction)
} else {
atomicstore64(&memstats.next_gc, heapminimum)
}
res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve)
// MCentral_FreeSpan updates sweepgen
}
if trace.enabled {
traceGCSweepDone()
traceNextGC()
}
return res
}
......@@ -4,7 +4,72 @@
// Page heap.
//
// See malloc.h for overview.
// See malloc.go for overview.
package runtime
import "unsafe"
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
// but all the other global data is here too.
type mheap struct {
lock mutex
free [_MaxMHeapList]mspan // free lists of given length
freelarge mspan // free lists length >= _MaxMHeapList
busy [_MaxMHeapList]mspan // busy lists of large objects of given length
busylarge mspan // busy lists of large objects length >= _MaxMHeapList
allspans **mspan // all spans out there
gcspans **mspan // copy of allspans referenced by gc marker or sweeper
nspan uint32
sweepgen uint32 // sweep generation, see comment in mspan
sweepdone uint32 // all spans are swept
// span lookup
spans **mspan
spans_mapped uintptr
// range of addresses we might see in the heap
bitmap uintptr
bitmap_mapped uintptr
arena_start uintptr
arena_used uintptr
arena_end uintptr
arena_reserved bool
// write barrier shadow data+heap.
// 64-bit systems only, enabled by GODEBUG=wbshadow=1.
shadow_enabled bool // shadow should be updated and checked
shadow_reserved bool // shadow memory is reserved
shadow_heap uintptr // heap-addr + shadow_heap = shadow heap addr
shadow_data uintptr // data-addr + shadow_data = shadow data addr
data_start uintptr // start of shadowed data addresses
data_end uintptr // end of shadowed data addresses
// central free lists for small size classes.
// the padding makes sure that the MCentrals are
// spaced CacheLineSize bytes apart, so that each MCentral.lock
// gets its own cache line.
central [_NumSizeClasses]struct {
mcentral mcentral
pad [_CacheLineSize]byte
}
spanalloc fixalloc // allocator for span*
cachealloc fixalloc // allocator for mcache*
specialfinalizeralloc fixalloc // allocator for specialfinalizer*
specialprofilealloc fixalloc // allocator for specialprofile*
speciallock mutex // lock for sepcial record allocators.
// Malloc stats.
largefree uint64 // bytes freed for large objects (>maxsmallsize)
nlargefree uint64 // number of frees for large objects (>maxsmallsize)
nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
}
var mheap_ mheap
// An MSpan is a run of pages.
//
// When a MSpan is in the heap free list, state == MSpanFree
// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
......@@ -12,9 +77,55 @@
// When a MSpan is allocated, state == MSpanInUse or MSpanStack
// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
package runtime
import "unsafe"
// Every MSpan is in one doubly-linked list,
// either one of the MHeap's free lists or one of the
// MCentral's span lists. We use empty MSpan structures as list heads.
const (
_MSpanInUse = iota // allocated for garbage collected heap
_MSpanStack // allocated for use by stack allocator
_MSpanFree
_MSpanListHead
_MSpanDead
)
type mspan struct {
next *mspan // in a span linked list
prev *mspan // in a span linked list
start pageID // starting page number
npages uintptr // number of pages in span
freelist gclinkptr // list of free objects
// sweep generation:
// if sweepgen == h->sweepgen - 2, the span needs sweeping
// if sweepgen == h->sweepgen - 1, the span is currently being swept
// if sweepgen == h->sweepgen, the span is swept and ready to use
// h->sweepgen is incremented by 2 after every GC
sweepgen uint32
ref uint16 // capacity - number of objects in freelist
sizeclass uint8 // size class
incache bool // being used by an mcache
state uint8 // mspaninuse etc
needzero uint8 // needs to be zeroed before allocation
elemsize uintptr // computed from sizeclass or from npages
unusedsince int64 // first time spotted by gc in mspanfree state
npreleased uintptr // number of pages released to the os
limit uintptr // end of data in span
speciallock mutex // guards specials list
specials *special // linked list of special records sorted by offset.
}
func (s *mspan) base() uintptr {
return uintptr(s.start << _PageShift)
}
func (s *mspan) layout() (size, n, total uintptr) {
total = s.npages << _PageShift
size = s.elemsize
if size > 0 {
n = total / size
}
return
}
var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go
var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go
......@@ -50,6 +161,73 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h.nspan = uint32(len(h_allspans))
}
// inheap reports whether b is a pointer into a (potentially dead) heap object.
// It returns false for pointers into stack spans.
//go:nowritebarrier
func inheap(b uintptr) bool {
if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
return false
}
// Not a beginning of a block, consult span table to find the block beginning.
k := b >> _PageShift
x := k
x -= mheap_.arena_start >> _PageShift
s := h_spans[x]
if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
return false
}
return true
}
func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
_g_ := getg()
_g_.m.mcache.local_nlookup++
if ptrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
// purge cache stats to prevent overflow
lock(&mheap_.lock)
purgecachedstats(_g_.m.mcache)
unlock(&mheap_.lock)
}
s := mHeap_LookupMaybe(&mheap_, unsafe.Pointer(v))
if sp != nil {
*sp = s
}
if s == nil {
if base != nil {
*base = 0
}
if size != nil {
*size = 0
}
return 0
}
p := uintptr(s.start) << _PageShift
if s.sizeclass == 0 {
// Large object.
if base != nil {
*base = p
}
if size != nil {
*size = s.npages << _PageShift
}
return 1
}
n := s.elemsize
if base != nil {
i := (uintptr(v) - uintptr(p)) / n
*base = p + i*n
}
if size != nil {
*size = n
}
return 1
}
// Initialize the heap.
func mHeap_Init(h *mheap, spans_size uintptr) {
fixAlloc_Init(&h.spanalloc, unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
......@@ -635,6 +813,21 @@ func mSpanList_InsertBack(list *mspan, span *mspan) {
span.prev.next = span
}
const (
_KindSpecialFinalizer = 1
_KindSpecialProfile = 2
// Note: The finalizer special must be first because if we're freeing
// an object, a finalizer special will cause the freeing operation
// to abort, and we want to keep the other special records around
// if that happens.
)
type special struct {
next *special // linked list in span
offset uint16 // span offset of object
kind byte // kind of special
}
// Adds the special record s to the list of special records for
// the object p. All fields of s should be filled in except for
// offset & next, which this routine will fill in.
......@@ -723,6 +916,15 @@ func removespecial(p unsafe.Pointer, kind uint8) *special {
return nil
}
// The described object has a finalizer set for it.
type specialfinalizer struct {
special special
fn *funcval
nret uintptr
fint *_type
ot *ptrtype
}
// Adds a finalizer to the object p. Returns true if it succeeded.
func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
lock(&mheap_.speciallock)
......@@ -755,6 +957,12 @@ func removefinalizer(p unsafe.Pointer) {
unlock(&mheap_.speciallock)
}
// The described object is being heap profiled.
type specialprofile struct {
special special
b *bucket
}
// Set the heap profile bucket associated with addr to b.
func setprofilebucket(p unsafe.Pointer, b *bucket) {
lock(&mheap_.speciallock)
......
......@@ -27,8 +27,15 @@
package runtime
//var class_to_size [_NumSizeClasses]int32
//var class_to_allocnpages [_NumSizeClasses]int32
// Size classes. Computed and initialized by InitSizes.
//
// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
// 1 <= sizeclass < NumSizeClasses, for n.
// Size class 0 is reserved to mean "not small".
//
// class_to_size[i] = largest size in class i
// class_to_allocnpages[i] = number of pages to allocate when
// making new objects in class i
// The SizeToClass lookup is implemented using two arrays,
// one mapping sizes <= 1024 to their class and one mapping
......@@ -38,8 +45,11 @@ package runtime
// are 128-aligned, so the second array is indexed by the
// size divided by 128 (rounded up). The arrays are filled in
// by InitSizes.
//var size_to_class8 [1024/8 + 1]int8
//var size_to_class128 [(_MaxSmallSize-1024)/128 + 1]int8
var class_to_size [_NumSizeClasses]int32
var class_to_allocnpages [_NumSizeClasses]int32
var size_to_class8 [1024/8 + 1]int8
var size_to_class128 [(_MaxSmallSize-1024)/128 + 1]int8
func sizeToClass(size int32) int32 {
if size > _MaxSmallSize {
......
......@@ -528,6 +528,21 @@ func quiesce(mastergp *g) {
mcall(mquiesce)
}
// Holding worldsema grants an M the right to try to stop the world.
// The procedure is:
//
// semacquire(&worldsema);
// m.preemptoff = "reason";
// stoptheworld();
//
// ... do stuff ...
//
// m.preemptoff = "";
// semrelease(&worldsema);
// starttheworld();
//
var worldsema uint32 = 1
// This is used by the GC as well as the routines that do stack dumps. In the case
// of GC all the routines can be reliably stopped. This is not always the case
// when the system is in panic or being exited.
......
......@@ -239,3 +239,13 @@ func prefetcht0(addr uintptr)
func prefetcht1(addr uintptr)
func prefetcht2(addr uintptr)
func prefetchnta(addr uintptr)
func unixnanotime() int64 {
sec, nsec := time_now()
return sec*1e9 + int64(nsec)
}
// round n up to a multiple of a. a must be a power of 2.
func round(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1)
}
......@@ -299,3 +299,17 @@ func readvarint(p []byte) (newp []byte, val uint32) {
}
return p, v
}
type stackmap struct {
n int32 // number of bitmaps
nbit int32 // number of bits in each bitmap
bytedata [1]byte // bitmaps, each starting on a 32-bit boundary
}
//go:nowritebarrier
func stackmapdata(stkmap *stackmap, n int32) bitvector {
if n < 0 || n >= stkmap.n {
throw("stackmapdata: index out of range")
}
return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+31)/32*4))))}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment