Commit 1652a2c3 authored by Matthew Dempsky's avatar Matthew Dempsky

runtime: add mSpanList type to represent lists of mspans

This CL introduces a new mSpanList type to replace the empty mspan
variables that were previously used as list heads.

To be type safe, the previous circular linked list data structure is
now a tail queue instead.  One complication of this is
mSpanList_Remove needs to know the list a span is being removed from,
but this appears to be computable in all circumstances.

As a temporary sanity check, mSpanList_Insert and mSpanList_InsertBack
record the list that an mspan has been inserted into so that
mSpanList_Remove can verify that the correct list was specified.

Whereas mspan is 112 bytes on amd64, mSpanList is only 16 bytes.  This
shrinks the size of mheap from 50216 bytes to 12584 bytes.

Change-Id: I8146364753dbc3b4ab120afbb9c7b8740653c216
Reviewed-on: https://go-review.googlesource.com/15906
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent 151f4ec9
...@@ -16,8 +16,8 @@ package runtime ...@@ -16,8 +16,8 @@ package runtime
type mcentral struct { type mcentral struct {
lock mutex lock mutex
sizeclass int32 sizeclass int32
nonempty mspan // list of spans with a free object nonempty mSpanList // list of spans with a free object
empty mspan // list of spans with no free objects (or cached in an mcache) empty mSpanList // list of spans with no free objects (or cached in an mcache)
} }
// Initialize a single central free list. // Initialize a single central free list.
...@@ -36,9 +36,9 @@ func mCentral_CacheSpan(c *mcentral) *mspan { ...@@ -36,9 +36,9 @@ func mCentral_CacheSpan(c *mcentral) *mspan {
sg := mheap_.sweepgen sg := mheap_.sweepgen
retry: retry:
var s *mspan var s *mspan
for s = c.nonempty.next; s != &c.nonempty; s = s.next { for s = c.nonempty.first; s != nil; s = s.next {
if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) { if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
mSpanList_Remove(s) mSpanList_Remove(&c.nonempty, s)
mSpanList_InsertBack(&c.empty, s) mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock) unlock(&c.lock)
mSpan_Sweep(s, true) mSpan_Sweep(s, true)
...@@ -49,17 +49,17 @@ retry: ...@@ -49,17 +49,17 @@ retry:
continue continue
} }
// we have a nonempty span that does not require sweeping, allocate from it // we have a nonempty span that does not require sweeping, allocate from it
mSpanList_Remove(s) mSpanList_Remove(&c.nonempty, s)
mSpanList_InsertBack(&c.empty, s) mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock) unlock(&c.lock)
goto havespan goto havespan
} }
for s = c.empty.next; s != &c.empty; s = s.next { for s = c.empty.first; s != nil; s = s.next {
if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) { if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
// we have an empty span that requires sweeping, // we have an empty span that requires sweeping,
// sweep it and see if we can free some space in it // sweep it and see if we can free some space in it
mSpanList_Remove(s) mSpanList_Remove(&c.empty, s)
// swept spans are at the end of the list // swept spans are at the end of the list
mSpanList_InsertBack(&c.empty, s) mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock) unlock(&c.lock)
...@@ -119,7 +119,7 @@ func mCentral_UncacheSpan(c *mcentral, s *mspan) { ...@@ -119,7 +119,7 @@ func mCentral_UncacheSpan(c *mcentral, s *mspan) {
cap := int32((s.npages << _PageShift) / s.elemsize) cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.ref) n := cap - int32(s.ref)
if n > 0 { if n > 0 {
mSpanList_Remove(s) mSpanList_Remove(&c.empty, s)
mSpanList_Insert(&c.nonempty, s) mSpanList_Insert(&c.nonempty, s)
} }
unlock(&c.lock) unlock(&c.lock)
...@@ -145,7 +145,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli ...@@ -145,7 +145,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli
if preserve { if preserve {
// preserve is set only when called from MCentral_CacheSpan above, // preserve is set only when called from MCentral_CacheSpan above,
// the span must be in the empty list. // the span must be in the empty list.
if s.next == nil { if !mSpan_InList(s) {
throw("can't preserve unlinked span") throw("can't preserve unlinked span")
} }
atomicstore(&s.sweepgen, mheap_.sweepgen) atomicstore(&s.sweepgen, mheap_.sweepgen)
...@@ -156,7 +156,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli ...@@ -156,7 +156,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli
// Move to nonempty if necessary. // Move to nonempty if necessary.
if wasempty { if wasempty {
mSpanList_Remove(s) mSpanList_Remove(&c.empty, s)
mSpanList_Insert(&c.nonempty, s) mSpanList_Insert(&c.nonempty, s)
} }
...@@ -172,7 +172,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli ...@@ -172,7 +172,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli
} }
// s is completely freed, return it to the heap. // s is completely freed, return it to the heap.
mSpanList_Remove(s) mSpanList_Remove(&c.nonempty, s)
s.needzero = 1 s.needzero = 1
s.freelist = 0 s.freelist = 0
unlock(&c.lock) unlock(&c.lock)
......
...@@ -15,12 +15,12 @@ import "unsafe" ...@@ -15,12 +15,12 @@ import "unsafe"
// but all the other global data is here too. // but all the other global data is here too.
type mheap struct { type mheap struct {
lock mutex lock mutex
free [_MaxMHeapList]mspan // free lists of given length free [_MaxMHeapList]mSpanList // free lists of given length
freelarge mspan // free lists length >= _MaxMHeapList freelarge mSpanList // free lists length >= _MaxMHeapList
busy [_MaxMHeapList]mspan // busy lists of large objects of given length busy [_MaxMHeapList]mSpanList // busy lists of large objects of given length
busylarge mspan // busy lists of large objects length >= _MaxMHeapList busylarge mSpanList // busy lists of large objects length >= _MaxMHeapList
allspans **mspan // all spans out there allspans **mspan // all spans out there
gcspans **mspan // copy of allspans referenced by gc marker or sweeper gcspans **mspan // copy of allspans referenced by gc marker or sweeper
nspan uint32 nspan uint32
sweepgen uint32 // sweep generation, see comment in mspan sweepgen uint32 // sweep generation, see comment in mspan
sweepdone uint32 // all spans are swept sweepdone uint32 // all spans are swept
...@@ -77,7 +77,7 @@ var mheap_ mheap ...@@ -77,7 +77,7 @@ var mheap_ mheap
// Every MSpan is in one doubly-linked list, // Every MSpan is in one doubly-linked list,
// either one of the MHeap's free lists or one of the // either one of the MHeap's free lists or one of the
// MCentral's span lists. We use empty MSpan structures as list heads. // MCentral's span lists.
// An MSpan representing actual memory has state _MSpanInUse, // An MSpan representing actual memory has state _MSpanInUse,
// _MSpanStack, or _MSpanFree. Transitions between these states are // _MSpanStack, or _MSpanFree. Transitions between these states are
...@@ -97,13 +97,22 @@ const ( ...@@ -97,13 +97,22 @@ const (
_MSpanInUse = iota // allocated for garbage collected heap _MSpanInUse = iota // allocated for garbage collected heap
_MSpanStack // allocated for use by stack allocator _MSpanStack // allocated for use by stack allocator
_MSpanFree _MSpanFree
_MSpanListHead
_MSpanDead _MSpanDead
) )
// mSpanList heads a linked list of spans.
//
// Linked list structure is based on BSD's "tail queue" data structure.
type mSpanList struct {
first *mspan // first span in list, or nil if none
last **mspan // last span's next field, or first if none
}
type mspan struct { type mspan struct {
next *mspan // in a span linked list next *mspan // next span in list, or nil if none
prev *mspan // in a span linked list prev **mspan // previous span's next field, or list head's first field if none
list *mSpanList // For debugging. TODO: Remove.
start pageID // starting page number start pageID // starting page number
npages uintptr // number of pages in span npages uintptr // number of pages in span
freelist gclinkptr // list of free objects freelist gclinkptr // list of free objects
...@@ -320,13 +329,13 @@ func mHeap_MapSpans(h *mheap, arena_used uintptr) { ...@@ -320,13 +329,13 @@ func mHeap_MapSpans(h *mheap, arena_used uintptr) {
// Sweeps spans in list until reclaims at least npages into heap. // Sweeps spans in list until reclaims at least npages into heap.
// Returns the actual number of pages reclaimed. // Returns the actual number of pages reclaimed.
func mHeap_ReclaimList(h *mheap, list *mspan, npages uintptr) uintptr { func mHeap_ReclaimList(h *mheap, list *mSpanList, npages uintptr) uintptr {
n := uintptr(0) n := uintptr(0)
sg := mheap_.sweepgen sg := mheap_.sweepgen
retry: retry:
for s := list.next; s != list; s = s.next { for s := list.first; s != nil; s = s.next {
if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) { if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
mSpanList_Remove(s) mSpanList_Remove(list, s)
// swept spans are at the end of the list // swept spans are at the end of the list
mSpanList_InsertBack(list, s) mSpanList_InsertBack(list, s)
unlock(&h.lock) unlock(&h.lock)
...@@ -523,17 +532,20 @@ func mHeap_AllocStack(h *mheap, npage uintptr) *mspan { ...@@ -523,17 +532,20 @@ func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
// The returned span has been removed from the // The returned span has been removed from the
// free list, but its state is still MSpanFree. // free list, but its state is still MSpanFree.
func mHeap_AllocSpanLocked(h *mheap, npage uintptr) *mspan { func mHeap_AllocSpanLocked(h *mheap, npage uintptr) *mspan {
var list *mSpanList
var s *mspan var s *mspan
// Try in fixed-size lists up to max. // Try in fixed-size lists up to max.
for i := int(npage); i < len(h.free); i++ { for i := int(npage); i < len(h.free); i++ {
if !mSpanList_IsEmpty(&h.free[i]) { list = &h.free[i]
s = h.free[i].next if !mSpanList_IsEmpty(list) {
s = list.first
goto HaveSpan goto HaveSpan
} }
} }
// Best fit in list of large spans. // Best fit in list of large spans.
list = &h.freelarge
s = mHeap_AllocLarge(h, npage) s = mHeap_AllocLarge(h, npage)
if s == nil { if s == nil {
if !mHeap_Grow(h, npage) { if !mHeap_Grow(h, npage) {
...@@ -553,8 +565,8 @@ HaveSpan: ...@@ -553,8 +565,8 @@ HaveSpan:
if s.npages < npage { if s.npages < npage {
throw("MHeap_AllocLocked - bad npages") throw("MHeap_AllocLocked - bad npages")
} }
mSpanList_Remove(s) mSpanList_Remove(list, s)
if s.next != nil || s.prev != nil { if mSpan_InList(s) {
throw("still in list") throw("still in list")
} }
if s.npreleased > 0 { if s.npreleased > 0 {
...@@ -593,7 +605,7 @@ HaveSpan: ...@@ -593,7 +605,7 @@ HaveSpan:
memstats.heap_idle -= uint64(npage << _PageShift) memstats.heap_idle -= uint64(npage << _PageShift)
//println("spanalloc", hex(s.start<<_PageShift)) //println("spanalloc", hex(s.start<<_PageShift))
if s.next != nil || s.prev != nil { if mSpan_InList(s) {
throw("still in list") throw("still in list")
} }
return s return s
...@@ -607,8 +619,8 @@ func mHeap_AllocLarge(h *mheap, npage uintptr) *mspan { ...@@ -607,8 +619,8 @@ func mHeap_AllocLarge(h *mheap, npage uintptr) *mspan {
// Search list for smallest span with >= npage pages. // Search list for smallest span with >= npage pages.
// If there are multiple smallest spans, take the one // If there are multiple smallest spans, take the one
// with the earliest starting address. // with the earliest starting address.
func bestFit(list *mspan, npage uintptr, best *mspan) *mspan { func bestFit(list *mSpanList, npage uintptr, best *mspan) *mspan {
for s := list.next; s != list; s = s.next { for s := list.first; s != nil; s = s.next {
if s.npages < npage { if s.npages < npage {
continue continue
} }
...@@ -729,6 +741,7 @@ func mHeap_FreeStack(h *mheap, s *mspan) { ...@@ -729,6 +741,7 @@ func mHeap_FreeStack(h *mheap, s *mspan) {
unlock(&h.lock) unlock(&h.lock)
} }
// s must be on a busy list (h.busy or h.busylarge) or unlinked.
func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsince int64) { func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state { switch s.state {
case _MSpanStack: case _MSpanStack:
...@@ -752,7 +765,9 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi ...@@ -752,7 +765,9 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi
memstats.heap_idle += uint64(s.npages << _PageShift) memstats.heap_idle += uint64(s.npages << _PageShift)
} }
s.state = _MSpanFree s.state = _MSpanFree
mSpanList_Remove(s) if mSpan_InList(s) {
mSpanList_Remove(mHeap_BusyList(h, s.npages), s)
}
// Stamp newly unused spans. The scavenger will use that // Stamp newly unused spans. The scavenger will use that
// info to potentially give back some pages to the OS. // info to potentially give back some pages to the OS.
...@@ -767,40 +782,50 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi ...@@ -767,40 +782,50 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi
p -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift p -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
if p > 0 { if p > 0 {
t := h_spans[p-1] t := h_spans[p-1]
if t != nil && t.state != _MSpanInUse && t.state != _MSpanStack { if t != nil && t.state == _MSpanFree {
s.start = t.start s.start = t.start
s.npages += t.npages s.npages += t.npages
s.npreleased = t.npreleased // absorb released pages s.npreleased = t.npreleased // absorb released pages
s.needzero |= t.needzero s.needzero |= t.needzero
p -= t.npages p -= t.npages
h_spans[p] = s h_spans[p] = s
mSpanList_Remove(t) mSpanList_Remove(mHeap_FreeList(h, t.npages), t)
t.state = _MSpanDead t.state = _MSpanDead
fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t)) fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
} }
} }
if (p+s.npages)*ptrSize < h.spans_mapped { if (p+s.npages)*ptrSize < h.spans_mapped {
t := h_spans[p+s.npages] t := h_spans[p+s.npages]
if t != nil && t.state != _MSpanInUse && t.state != _MSpanStack { if t != nil && t.state == _MSpanFree {
s.npages += t.npages s.npages += t.npages
s.npreleased += t.npreleased s.npreleased += t.npreleased
s.needzero |= t.needzero s.needzero |= t.needzero
h_spans[p+s.npages-1] = s h_spans[p+s.npages-1] = s
mSpanList_Remove(t) mSpanList_Remove(mHeap_FreeList(h, t.npages), t)
t.state = _MSpanDead t.state = _MSpanDead
fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t)) fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
} }
} }
// Insert s into appropriate list. // Insert s into appropriate list.
if s.npages < uintptr(len(h.free)) { mSpanList_Insert(mHeap_FreeList(h, s.npages), s)
mSpanList_Insert(&h.free[s.npages], s) }
} else {
mSpanList_Insert(&h.freelarge, s) func mHeap_FreeList(h *mheap, npages uintptr) *mSpanList {
if npages < uintptr(len(h.free)) {
return &h.free[npages]
}
return &h.freelarge
}
func mHeap_BusyList(h *mheap, npages uintptr) *mSpanList {
if npages < uintptr(len(h.free)) {
return &h.busy[npages]
} }
return &h.busylarge
} }
func scavengelist(list *mspan, now, limit uint64) uintptr { func scavengelist(list *mSpanList, now, limit uint64) uintptr {
if _PhysPageSize > _PageSize { if _PhysPageSize > _PageSize {
// golang.org/issue/9993 // golang.org/issue/9993
// If the physical page size of the machine is larger than // If the physical page size of the machine is larger than
...@@ -815,7 +840,7 @@ func scavengelist(list *mspan, now, limit uint64) uintptr { ...@@ -815,7 +840,7 @@ func scavengelist(list *mspan, now, limit uint64) uintptr {
} }
var sumreleased uintptr var sumreleased uintptr
for s := list.next; s != list; s = s.next { for s := list.first; s != nil; s = s.next {
if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
released := (s.npages - s.npreleased) << _PageShift released := (s.npages - s.npreleased) << _PageShift
memstats.heap_released += uint64(released) memstats.heap_released += uint64(released)
...@@ -857,6 +882,7 @@ func runtime_debug_freeOSMemory() { ...@@ -857,6 +882,7 @@ func runtime_debug_freeOSMemory() {
func mSpan_Init(span *mspan, start pageID, npages uintptr) { func mSpan_Init(span *mspan, start pageID, npages uintptr) {
span.next = nil span.next = nil
span.prev = nil span.prev = nil
span.list = nil
span.start = start span.start = start
span.npages = npages span.npages = npages
span.freelist = 0 span.freelist = 0
...@@ -872,47 +898,64 @@ func mSpan_Init(span *mspan, start pageID, npages uintptr) { ...@@ -872,47 +898,64 @@ func mSpan_Init(span *mspan, start pageID, npages uintptr) {
span.needzero = 0 span.needzero = 0
} }
func mSpan_InList(span *mspan) bool {
return span.prev != nil
}
// Initialize an empty doubly-linked list. // Initialize an empty doubly-linked list.
func mSpanList_Init(list *mspan) { func mSpanList_Init(list *mSpanList) {
list.state = _MSpanListHead list.first = nil
list.next = list list.last = &list.first
list.prev = list
} }
func mSpanList_Remove(span *mspan) { func mSpanList_Remove(list *mSpanList, span *mspan) {
if span.prev == nil && span.next == nil { if span.prev == nil || span.list != list {
return println("failed MSpanList_Remove", span, span.prev, span.list, list)
throw("MSpanList_Remove")
} }
span.prev.next = span.next if span.next != nil {
span.next.prev = span.prev span.next.prev = span.prev
span.prev = nil } else {
// TODO: After we remove the span.list != list check above,
// we could at least still check list.last == &span.next here.
list.last = span.prev
}
*span.prev = span.next
span.next = nil span.next = nil
span.prev = nil
span.list = nil
} }
func mSpanList_IsEmpty(list *mspan) bool { func mSpanList_IsEmpty(list *mSpanList) bool {
return list.next == list return list.first == nil
} }
func mSpanList_Insert(list *mspan, span *mspan) { func mSpanList_Insert(list *mSpanList, span *mspan) {
if span.next != nil || span.prev != nil { if span.next != nil || span.prev != nil || span.list != nil {
println("failed MSpanList_Insert", span, span.next, span.prev) println("failed MSpanList_Insert", span, span.next, span.prev, span.list)
throw("MSpanList_Insert") throw("MSpanList_Insert")
} }
span.next = list.next span.next = list.first
span.prev = list if list.first != nil {
span.next.prev = span list.first.prev = &span.next
span.prev.next = span } else {
list.last = &span.next
}
list.first = span
span.prev = &list.first
span.list = list
} }
func mSpanList_InsertBack(list *mspan, span *mspan) { func mSpanList_InsertBack(list *mSpanList, span *mspan) {
if span.next != nil || span.prev != nil { if span.next != nil || span.prev != nil || span.list != nil {
println("failed MSpanList_InsertBack", span, span.next, span.prev) println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
throw("MSpanList_InsertBack") throw("MSpanList_InsertBack")
} }
span.next = list span.next = nil
span.prev = list.prev span.prev = list.last
span.next.prev = span *list.last = span
span.prev.next = span list.last = &span.next
span.list = list
} }
const ( const (
......
...@@ -142,12 +142,12 @@ const ( ...@@ -142,12 +142,12 @@ const (
// order = log_2(size/FixedStack) // order = log_2(size/FixedStack)
// There is a free list for each order. // There is a free list for each order.
// TODO: one lock per order? // TODO: one lock per order?
var stackpool [_NumStackOrders]mspan var stackpool [_NumStackOrders]mSpanList
var stackpoolmu mutex var stackpoolmu mutex
// List of stack spans to be freed at the end of GC. Protected by // List of stack spans to be freed at the end of GC. Protected by
// stackpoolmu. // stackpoolmu.
var stackFreeQueue mspan var stackFreeQueue mSpanList
// Cached value of haveexperiment("framepointer") // Cached value of haveexperiment("framepointer")
var framepointer_enabled bool var framepointer_enabled bool
...@@ -166,8 +166,8 @@ func stackinit() { ...@@ -166,8 +166,8 @@ func stackinit() {
// stackpoolmu held. // stackpoolmu held.
func stackpoolalloc(order uint8) gclinkptr { func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order] list := &stackpool[order]
s := list.next s := list.first
if s == list { if s == nil {
// no free stacks. Allocate another span worth. // no free stacks. Allocate another span worth.
s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift) s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
if s == nil { if s == nil {
...@@ -194,7 +194,7 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -194,7 +194,7 @@ func stackpoolalloc(order uint8) gclinkptr {
s.ref++ s.ref++
if s.freelist.ptr() == nil { if s.freelist.ptr() == nil {
// all stacks in s are allocated. // all stacks in s are allocated.
mSpanList_Remove(s) mSpanList_Remove(list, s)
} }
return x return x
} }
...@@ -228,7 +228,7 @@ func stackpoolfree(x gclinkptr, order uint8) { ...@@ -228,7 +228,7 @@ func stackpoolfree(x gclinkptr, order uint8) {
// pointer into a free span. // pointer into a free span.
// //
// By not freeing, we prevent step #4 until GC is done. // By not freeing, we prevent step #4 until GC is done.
mSpanList_Remove(s) mSpanList_Remove(&stackpool[order], s)
s.freelist = 0 s.freelist = 0
mHeap_FreeStack(&mheap_, s) mHeap_FreeStack(&mheap_, s)
} }
...@@ -994,10 +994,10 @@ func freeStackSpans() { ...@@ -994,10 +994,10 @@ func freeStackSpans() {
// Scan stack pools for empty stack spans. // Scan stack pools for empty stack spans.
for order := range stackpool { for order := range stackpool {
list := &stackpool[order] list := &stackpool[order]
for s := list.next; s != list; { for s := list.first; s != nil; {
next := s.next next := s.next
if s.ref == 0 { if s.ref == 0 {
mSpanList_Remove(s) mSpanList_Remove(list, s)
s.freelist = 0 s.freelist = 0
mHeap_FreeStack(&mheap_, s) mHeap_FreeStack(&mheap_, s)
} }
...@@ -1006,9 +1006,9 @@ func freeStackSpans() { ...@@ -1006,9 +1006,9 @@ func freeStackSpans() {
} }
// Free queued stack spans. // Free queued stack spans.
for stackFreeQueue.next != &stackFreeQueue { for !mSpanList_IsEmpty(&stackFreeQueue) {
s := stackFreeQueue.next s := stackFreeQueue.first
mSpanList_Remove(s) mSpanList_Remove(&stackFreeQueue, s)
mHeap_FreeStack(&mheap_, s) mHeap_FreeStack(&mheap_, s)
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment