Commit a33ef8d1 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov Committed by Russ Cox

runtime: account for all sys memory in MemStats

Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.

test/bench/garbage/parser before:
Sys		670064344
HeapSys		610271232
StackSys	65536
MSpanSys	14204928
MCacheSys	16384
BuckHashSys	1439992

after:
Sys		670064344
HeapSys		610271232
StackSys	65536
MSpanSys	14188544
MCacheSys	16384
BuckHashSys	3194304
GCSys		39198688
OtherSys	3129656

Fixes #5799.

R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
parent 52f15df9
...@@ -127,7 +127,7 @@ runtime·SetCPUProfileRate(intgo hz) ...@@ -127,7 +127,7 @@ runtime·SetCPUProfileRate(intgo hz)
{ {
uintptr *p; uintptr *p;
uintptr n; uintptr n;
// Clamp hz to something reasonable. // Clamp hz to something reasonable.
if(hz < 0) if(hz < 0)
hz = 0; hz = 0;
...@@ -137,7 +137,7 @@ runtime·SetCPUProfileRate(intgo hz) ...@@ -137,7 +137,7 @@ runtime·SetCPUProfileRate(intgo hz)
runtime·lock(&lk); runtime·lock(&lk);
if(hz > 0) { if(hz > 0) {
if(prof == nil) { if(prof == nil) {
prof = runtime·SysAlloc(sizeof *prof); prof = runtime·SysAlloc(sizeof *prof, &mstats.other_sys);
if(prof == nil) { if(prof == nil) {
runtime·printf("runtime: cpu profiling cannot allocate memory\n"); runtime·printf("runtime: cpu profiling cannot allocate memory\n");
runtime·unlock(&lk); runtime·unlock(&lk);
......
...@@ -86,7 +86,7 @@ itab(InterfaceType *inter, Type *type, int32 canfail) ...@@ -86,7 +86,7 @@ itab(InterfaceType *inter, Type *type, int32 canfail)
} }
ni = inter->mhdr.len; ni = inter->mhdr.len;
m = runtime·persistentalloc(sizeof(*m) + ni*sizeof m->fun[0], 0); m = runtime·persistentalloc(sizeof(*m) + ni*sizeof m->fun[0], 0, &mstats.other_sys);
m->inter = inter; m->inter = inter;
m->type = type; m->type = type;
......
...@@ -269,8 +269,6 @@ runtime·allocmcache(void) ...@@ -269,8 +269,6 @@ runtime·allocmcache(void)
runtime·lock(&runtime·mheap); runtime·lock(&runtime·mheap);
c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
mstats.mcache_sys = runtime·mheap.cachealloc.sys;
runtime·unlock(&runtime·mheap); runtime·unlock(&runtime·mheap);
runtime·memclr((byte*)c, sizeof(*c)); runtime·memclr((byte*)c, sizeof(*c));
...@@ -472,7 +470,7 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n) ...@@ -472,7 +470,7 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
if(n <= h->arena_end - h->arena_used) { if(n <= h->arena_end - h->arena_used) {
// Keep taking from our reservation. // Keep taking from our reservation.
p = h->arena_used; p = h->arena_used;
runtime·SysMap(p, n); runtime·SysMap(p, n, &mstats.heap_sys);
h->arena_used += n; h->arena_used += n;
runtime·MHeap_MapBits(h); runtime·MHeap_MapBits(h);
runtime·MHeap_MapSpans(h); runtime·MHeap_MapSpans(h);
...@@ -488,14 +486,14 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n) ...@@ -488,14 +486,14 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
// On 32-bit, once the reservation is gone we can // On 32-bit, once the reservation is gone we can
// try to get memory at a location chosen by the OS // try to get memory at a location chosen by the OS
// and hope that it is in the range we allocated bitmap for. // and hope that it is in the range we allocated bitmap for.
p = runtime·SysAlloc(n); p = runtime·SysAlloc(n, &mstats.heap_sys);
if(p == nil) if(p == nil)
return nil; return nil;
if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) { if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) {
runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n", runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
p, h->arena_start, h->arena_start+MaxArena32); p, h->arena_start, h->arena_start+MaxArena32);
runtime·SysFree(p, n); runtime·SysFree(p, n, &mstats.heap_sys);
return nil; return nil;
} }
...@@ -530,7 +528,7 @@ enum ...@@ -530,7 +528,7 @@ enum
// Intended for things like function/type/debug-related persistent data. // Intended for things like function/type/debug-related persistent data.
// If align is 0, uses default align (currently 8). // If align is 0, uses default align (currently 8).
void* void*
runtime·persistentalloc(uintptr size, uintptr align) runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat)
{ {
byte *p; byte *p;
...@@ -542,11 +540,11 @@ runtime·persistentalloc(uintptr size, uintptr align) ...@@ -542,11 +540,11 @@ runtime·persistentalloc(uintptr size, uintptr align)
} else } else
align = 8; align = 8;
if(size >= PersistentAllocMaxBlock) if(size >= PersistentAllocMaxBlock)
return runtime·SysAlloc(size); return runtime·SysAlloc(size, stat);
runtime·lock(&persistent); runtime·lock(&persistent);
persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align); persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
if(persistent.pos + size > persistent.end) { if(persistent.pos + size > persistent.end) {
persistent.pos = runtime·SysAlloc(PersistentAllocChunk); persistent.pos = runtime·SysAlloc(PersistentAllocChunk, &mstats.other_sys);
if(persistent.pos == nil) { if(persistent.pos == nil) {
runtime·unlock(&persistent); runtime·unlock(&persistent);
runtime·throw("runtime: cannot allocate memory"); runtime·throw("runtime: cannot allocate memory");
...@@ -556,7 +554,12 @@ runtime·persistentalloc(uintptr size, uintptr align) ...@@ -556,7 +554,12 @@ runtime·persistentalloc(uintptr size, uintptr align)
p = persistent.pos; p = persistent.pos;
persistent.pos += size; persistent.pos += size;
runtime·unlock(&persistent); runtime·unlock(&persistent);
return p; if(stat != &mstats.other_sys) {
// reaccount the allocation against provided stat
runtime·xadd64(stat, size);
runtime·xadd64(&mstats.other_sys, -(uint64)size);
}
return p;
} }
static Lock settype_lock; static Lock settype_lock;
......
...@@ -172,11 +172,11 @@ struct MLink ...@@ -172,11 +172,11 @@ struct MLink
// //
// SysMap maps previously reserved address space for use. // SysMap maps previously reserved address space for use.
void* runtime·SysAlloc(uintptr nbytes); void* runtime·SysAlloc(uintptr nbytes, uint64 *stat);
void runtime·SysFree(void *v, uintptr nbytes); void runtime·SysFree(void *v, uintptr nbytes, uint64 *stat);
void runtime·SysUnused(void *v, uintptr nbytes); void runtime·SysUnused(void *v, uintptr nbytes);
void runtime·SysUsed(void *v, uintptr nbytes); void runtime·SysUsed(void *v, uintptr nbytes);
void runtime·SysMap(void *v, uintptr nbytes); void runtime·SysMap(void *v, uintptr nbytes, uint64 *stat);
void* runtime·SysReserve(void *v, uintptr nbytes); void* runtime·SysReserve(void *v, uintptr nbytes);
// FixAlloc is a simple free-list allocator for fixed size objects. // FixAlloc is a simple free-list allocator for fixed size objects.
...@@ -189,17 +189,17 @@ void* runtime·SysReserve(void *v, uintptr nbytes); ...@@ -189,17 +189,17 @@ void* runtime·SysReserve(void *v, uintptr nbytes);
// smashed by freeing and reallocating. // smashed by freeing and reallocating.
struct FixAlloc struct FixAlloc
{ {
uintptr size; uintptr size;
void (*first)(void *arg, byte *p); // called first time p is returned void (*first)(void *arg, byte *p); // called first time p is returned
void *arg; void* arg;
MLink *list; MLink* list;
byte *chunk; byte* chunk;
uint32 nchunk; uint32 nchunk;
uintptr inuse; // in-use bytes now uintptr inuse; // in-use bytes now
uintptr sys; // bytes obtained from system uint64* stat;
}; };
void runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg); void runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat);
void* runtime·FixAlloc_Alloc(FixAlloc *f); void* runtime·FixAlloc_Alloc(FixAlloc *f);
void runtime·FixAlloc_Free(FixAlloc *f, void *p); void runtime·FixAlloc_Free(FixAlloc *f, void *p);
...@@ -234,6 +234,8 @@ struct MStats ...@@ -234,6 +234,8 @@ struct MStats
uint64 mcache_inuse; // MCache structures uint64 mcache_inuse; // MCache structures
uint64 mcache_sys; uint64 mcache_sys;
uint64 buckhash_sys; // profiling bucket hash table uint64 buckhash_sys; // profiling bucket hash table
uint64 gc_sys;
uint64 other_sys;
// Statistics about garbage collector. // Statistics about garbage collector.
// Protected by mheap or stopping the world during GC. // Protected by mheap or stopping the world during GC.
...@@ -444,7 +446,7 @@ void runtime·MHeap_MapSpans(MHeap *h); ...@@ -444,7 +446,7 @@ void runtime·MHeap_MapSpans(MHeap *h);
void runtime·MHeap_Scavenger(void); void runtime·MHeap_Scavenger(void);
void* runtime·mallocgc(uintptr size, uintptr typ, uint32 flag); void* runtime·mallocgc(uintptr size, uintptr typ, uint32 flag);
void* runtime·persistentalloc(uintptr size, uintptr align); void* runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat);
int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s); int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s);
void runtime·gc(int32 force); void runtime·gc(int32 force);
void runtime·markallocated(void *v, uintptr n, bool noptr); void runtime·markallocated(void *v, uintptr n, bool noptr);
......
...@@ -5,10 +5,25 @@ ...@@ -5,10 +5,25 @@
package runtime_test package runtime_test
import ( import (
. "runtime"
"testing" "testing"
"unsafe" "unsafe"
) )
func TestMemStats(t *testing.T) {
// Test that MemStats has sane values.
st := new(MemStats)
ReadMemStats(st)
if st.HeapSys == 0 || st.StackSys == 0 || st.MSpanSys == 0 || st.MCacheSys == 0 ||
st.BuckHashSys == 0 || st.GCSys == 0 || st.OtherSys == 0 {
t.Fatalf("Zero sys value: %+v", *st)
}
if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
st.BuckHashSys+st.GCSys+st.OtherSys {
t.Fatalf("Bad sys value: %+v", *st)
}
}
var mallocSink uintptr var mallocSink uintptr
func BenchmarkMalloc8(b *testing.B) { func BenchmarkMalloc8(b *testing.B) {
......
...@@ -14,7 +14,7 @@ type MemStats struct { ...@@ -14,7 +14,7 @@ type MemStats struct {
// General statistics. // General statistics.
Alloc uint64 // bytes allocated and still in use Alloc uint64 // bytes allocated and still in use
TotalAlloc uint64 // bytes allocated (even if freed) TotalAlloc uint64 // bytes allocated (even if freed)
Sys uint64 // bytes obtained from system (should be sum of XxxSys below) Sys uint64 // bytes obtained from system (sum of XxxSys below)
Lookups uint64 // number of pointer lookups Lookups uint64 // number of pointer lookups
Mallocs uint64 // number of mallocs Mallocs uint64 // number of mallocs
Frees uint64 // number of frees Frees uint64 // number of frees
...@@ -37,6 +37,8 @@ type MemStats struct { ...@@ -37,6 +37,8 @@ type MemStats struct {
MCacheInuse uint64 // mcache structures MCacheInuse uint64 // mcache structures
MCacheSys uint64 MCacheSys uint64
BuckHashSys uint64 // profiling bucket hash table BuckHashSys uint64 // profiling bucket hash table
GCSys uint64 // GC metadata
OtherSys uint64 // other system allocations
// Garbage collector statistics. // Garbage collector statistics.
NextGC uint64 // next run in HeapAlloc time (bytes) NextGC uint64 // next run in HeapAlloc time (bytes)
......
...@@ -9,14 +9,14 @@ ...@@ -9,14 +9,14 @@
#include "malloc.h" #include "malloc.h"
void* void*
runtime·SysAlloc(uintptr n) runtime·SysAlloc(uintptr n, uint64 *stat)
{ {
void *v; void *v;
mstats.sys += n;
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(v < (void*)4096) if(v < (void*)4096)
return nil; return nil;
runtime·xadd64(stat, n);
return v; return v;
} }
...@@ -35,9 +35,9 @@ runtime·SysUsed(void *v, uintptr n) ...@@ -35,9 +35,9 @@ runtime·SysUsed(void *v, uintptr n)
} }
void void
runtime·SysFree(void *v, uintptr n) runtime·SysFree(void *v, uintptr n, uint64 *stat)
{ {
mstats.sys -= n; runtime·xadd64(stat, -(uint64)n);
runtime·munmap(v, n); runtime·munmap(v, n);
} }
...@@ -58,11 +58,11 @@ enum ...@@ -58,11 +58,11 @@ enum
}; };
void void
runtime·SysMap(void *v, uintptr n) runtime·SysMap(void *v, uintptr n, uint64 *stat)
{ {
void *p; void *p;
mstats.sys += n; runtime·xadd64(stat, n);
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0); p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM) if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory"); runtime·throw("runtime: out of memory");
......
...@@ -14,14 +14,14 @@ enum ...@@ -14,14 +14,14 @@ enum
}; };
void* void*
runtime·SysAlloc(uintptr n) runtime·SysAlloc(uintptr n, uint64 *stat)
{ {
void *v; void *v;
mstats.sys += n;
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(v < (void*)4096) if(v < (void*)4096)
return nil; return nil;
runtime·xadd64(stat, n);
return v; return v;
} }
...@@ -39,9 +39,9 @@ runtime·SysUsed(void *v, uintptr n) ...@@ -39,9 +39,9 @@ runtime·SysUsed(void *v, uintptr n)
} }
void void
runtime·SysFree(void *v, uintptr n) runtime·SysFree(void *v, uintptr n, uint64 *stat)
{ {
mstats.sys -= n; runtime·xadd64(stat, -(uint64)n);
runtime·munmap(v, n); runtime·munmap(v, n);
} }
...@@ -63,11 +63,11 @@ runtime·SysReserve(void *v, uintptr n) ...@@ -63,11 +63,11 @@ runtime·SysReserve(void *v, uintptr n)
} }
void void
runtime·SysMap(void *v, uintptr n) runtime·SysMap(void *v, uintptr n, uint64 *stat)
{ {
void *p; void *p;
mstats.sys += n; runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully. // On 64-bit, we don't actually have v reserved, so tread carefully.
if(sizeof(void*) == 8) { if(sizeof(void*) == 8) {
......
...@@ -50,11 +50,10 @@ mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset) ...@@ -50,11 +50,10 @@ mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset)
} }
void* void*
runtime·SysAlloc(uintptr n) runtime·SysAlloc(uintptr n, uint64 *stat)
{ {
void *p; void *p;
mstats.sys += n;
p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096) { if(p < (void*)4096) {
if(p == (void*)EACCES) { if(p == (void*)EACCES) {
...@@ -68,6 +67,7 @@ runtime·SysAlloc(uintptr n) ...@@ -68,6 +67,7 @@ runtime·SysAlloc(uintptr n)
} }
return nil; return nil;
} }
runtime·xadd64(stat, n);
return p; return p;
} }
...@@ -85,9 +85,9 @@ runtime·SysUsed(void *v, uintptr n) ...@@ -85,9 +85,9 @@ runtime·SysUsed(void *v, uintptr n)
} }
void void
runtime·SysFree(void *v, uintptr n) runtime·SysFree(void *v, uintptr n, uint64 *stat)
{ {
mstats.sys -= n; runtime·xadd64(stat, -(uint64)n);
runtime·munmap(v, n); runtime·munmap(v, n);
} }
...@@ -118,11 +118,11 @@ runtime·SysReserve(void *v, uintptr n) ...@@ -118,11 +118,11 @@ runtime·SysReserve(void *v, uintptr n)
} }
void void
runtime·SysMap(void *v, uintptr n) runtime·SysMap(void *v, uintptr n, uint64 *stat)
{ {
void *p; void *p;
mstats.sys += n; runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully. // On 64-bit, we don't actually have v reserved, so tread carefully.
if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) { if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
......
...@@ -14,14 +14,14 @@ enum ...@@ -14,14 +14,14 @@ enum
}; };
void* void*
runtime·SysAlloc(uintptr n) runtime·SysAlloc(uintptr n, uint64 *stat)
{ {
void *v; void *v;
mstats.sys += n;
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(v < (void*)4096) if(v < (void*)4096)
return nil; return nil;
runtime·xadd64(stat, n);
return v; return v;
} }
...@@ -39,9 +39,9 @@ runtime·SysUsed(void *v, uintptr n) ...@@ -39,9 +39,9 @@ runtime·SysUsed(void *v, uintptr n)
} }
void void
runtime·SysFree(void *v, uintptr n) runtime·SysFree(void *v, uintptr n, uint64 *stat)
{ {
mstats.sys -= n; runtime·xadd64(stat, -(uint64)n);
runtime·munmap(v, n); runtime·munmap(v, n);
} }
...@@ -63,11 +63,11 @@ runtime·SysReserve(void *v, uintptr n) ...@@ -63,11 +63,11 @@ runtime·SysReserve(void *v, uintptr n)
} }
void void
runtime·SysMap(void *v, uintptr n) runtime·SysMap(void *v, uintptr n, uint64 *stat)
{ {
void *p; void *p;
mstats.sys += n; runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully. // On 64-bit, we don't actually have v reserved, so tread carefully.
if(sizeof(void*) == 8) { if(sizeof(void*) == 8) {
......
...@@ -14,14 +14,14 @@ enum ...@@ -14,14 +14,14 @@ enum
}; };
void* void*
runtime·SysAlloc(uintptr n) runtime·SysAlloc(uintptr n, uint64 *stat)
{ {
void *v; void *v;
mstats.sys += n;
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(v < (void*)4096) if(v < (void*)4096)
return nil; return nil;
runtime·xadd64(stat, n);
return v; return v;
} }
...@@ -39,9 +39,9 @@ runtime·SysUsed(void *v, uintptr n) ...@@ -39,9 +39,9 @@ runtime·SysUsed(void *v, uintptr n)
} }
void void
runtime·SysFree(void *v, uintptr n) runtime·SysFree(void *v, uintptr n, uint64 *stat)
{ {
mstats.sys -= n; runtime·xadd64(stat, -(uint64)n);
runtime·munmap(v, n); runtime·munmap(v, n);
} }
...@@ -63,11 +63,11 @@ runtime·SysReserve(void *v, uintptr n) ...@@ -63,11 +63,11 @@ runtime·SysReserve(void *v, uintptr n)
} }
void void
runtime·SysMap(void *v, uintptr n) runtime·SysMap(void *v, uintptr n, uint64 *stat)
{ {
void *p; void *p;
mstats.sys += n; runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully. // On 64-bit, we don't actually have v reserved, so tread carefully.
if(sizeof(void*) == 8) { if(sizeof(void*) == 8) {
......
...@@ -18,12 +18,11 @@ enum ...@@ -18,12 +18,11 @@ enum
}; };
void* void*
runtime·SysAlloc(uintptr nbytes) runtime·SysAlloc(uintptr nbytes, uint64 *stat)
{ {
uintptr bl; uintptr bl;
runtime·lock(&memlock); runtime·lock(&memlock);
mstats.sys += nbytes;
// Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c // Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c
bl = ((uintptr)bloc + Round) & ~Round; bl = ((uintptr)bloc + Round) & ~Round;
if(runtime·brk_((void*)(bl + nbytes)) < 0) { if(runtime·brk_((void*)(bl + nbytes)) < 0) {
...@@ -32,20 +31,21 @@ runtime·SysAlloc(uintptr nbytes) ...@@ -32,20 +31,21 @@ runtime·SysAlloc(uintptr nbytes)
} }
bloc = (byte*)bl + nbytes; bloc = (byte*)bl + nbytes;
runtime·unlock(&memlock); runtime·unlock(&memlock);
runtime·xadd64(stat, nbytes);
return (void*)bl; return (void*)bl;
} }
void void
runtime·SysFree(void *v, uintptr nbytes) runtime·SysFree(void *v, uintptr nbytes, uint64 *stat)
{ {
runtime·xadd64(stat, -(uint64)nbytes);
runtime·lock(&memlock); runtime·lock(&memlock);
mstats.sys -= nbytes;
// from tiny/mem.c // from tiny/mem.c
// Push pointer back if this is a free // Push pointer back if this is a free
// of the most recent SysAlloc. // of the most recent SysAlloc.
nbytes += (nbytes + Round) & ~Round; nbytes += (nbytes + Round) & ~Round;
if(bloc == (byte*)v+nbytes) if(bloc == (byte*)v+nbytes)
bloc -= nbytes; bloc -= nbytes;
runtime·unlock(&memlock); runtime·unlock(&memlock);
} }
...@@ -62,14 +62,14 @@ runtime·SysUsed(void *v, uintptr nbytes) ...@@ -62,14 +62,14 @@ runtime·SysUsed(void *v, uintptr nbytes)
} }
void void
runtime·SysMap(void *v, uintptr nbytes) runtime·SysMap(void *v, uintptr nbytes, uint64 *stat)
{ {
USED(v, nbytes); USED(v, nbytes, stat);
} }
void* void*
runtime·SysReserve(void *v, uintptr nbytes) runtime·SysReserve(void *v, uintptr nbytes)
{ {
USED(v); USED(v);
return runtime·SysAlloc(nbytes); return runtime·SysAlloc(nbytes, &mstats.heap_sys);
} }
...@@ -23,9 +23,9 @@ extern void *runtime·VirtualAlloc; ...@@ -23,9 +23,9 @@ extern void *runtime·VirtualAlloc;
extern void *runtime·VirtualFree; extern void *runtime·VirtualFree;
void* void*
runtime·SysAlloc(uintptr n) runtime·SysAlloc(uintptr n, uint64 *stat)
{ {
mstats.sys += n; runtime·xadd64(stat, n);
return runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, (uintptr)(MEM_COMMIT|MEM_RESERVE), (uintptr)PAGE_READWRITE); return runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, (uintptr)(MEM_COMMIT|MEM_RESERVE), (uintptr)PAGE_READWRITE);
} }
...@@ -50,11 +50,11 @@ runtime·SysUsed(void *v, uintptr n) ...@@ -50,11 +50,11 @@ runtime·SysUsed(void *v, uintptr n)
} }
void void
runtime·SysFree(void *v, uintptr n) runtime·SysFree(void *v, uintptr n, uint64 *stat)
{ {
uintptr r; uintptr r;
mstats.sys -= n; runtime·xadd64(stat, -(uint64)n);
r = (uintptr)runtime·stdcall(runtime·VirtualFree, 3, v, (uintptr)0, (uintptr)MEM_RELEASE); r = (uintptr)runtime·stdcall(runtime·VirtualFree, 3, v, (uintptr)0, (uintptr)MEM_RELEASE);
if(r == 0) if(r == 0)
runtime·throw("runtime: failed to release pages"); runtime·throw("runtime: failed to release pages");
...@@ -74,11 +74,11 @@ runtime·SysReserve(void *v, uintptr n) ...@@ -74,11 +74,11 @@ runtime·SysReserve(void *v, uintptr n)
} }
void void
runtime·SysMap(void *v, uintptr n) runtime·SysMap(void *v, uintptr n, uint64 *stat)
{ {
void *p; void *p;
mstats.sys += n; runtime·xadd64(stat, n);
p = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_COMMIT, (uintptr)PAGE_READWRITE); p = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_COMMIT, (uintptr)PAGE_READWRITE);
if(p != v) if(p != v)
runtime·throw("runtime: cannot map pages in arena address space"); runtime·throw("runtime: cannot map pages in arena address space");
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
// Initialize f to allocate objects of the given size, // Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory. // using the allocator to obtain chunks of memory.
void void
runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg) runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat)
{ {
f->size = size; f->size = size;
f->first = first; f->first = first;
...@@ -22,7 +22,7 @@ runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), v ...@@ -22,7 +22,7 @@ runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), v
f->chunk = nil; f->chunk = nil;
f->nchunk = 0; f->nchunk = 0;
f->inuse = 0; f->inuse = 0;
f->sys = 0; f->stat = stat;
} }
void* void*
...@@ -42,8 +42,7 @@ runtime·FixAlloc_Alloc(FixAlloc *f) ...@@ -42,8 +42,7 @@ runtime·FixAlloc_Alloc(FixAlloc *f)
return v; return v;
} }
if(f->nchunk < f->size) { if(f->nchunk < f->size) {
f->sys += FixAllocChunk; f->chunk = runtime·persistentalloc(FixAllocChunk, 0, f->stat);
f->chunk = runtime·persistentalloc(FixAllocChunk, 0);
f->nchunk = FixAllocChunk; f->nchunk = FixAllocChunk;
} }
v = f->chunk; v = f->chunk;
......
...@@ -1223,7 +1223,7 @@ getempty(Workbuf *b) ...@@ -1223,7 +1223,7 @@ getempty(Workbuf *b)
runtime·lock(&work); runtime·lock(&work);
if(work.nchunk < sizeof *b) { if(work.nchunk < sizeof *b) {
work.nchunk = 1<<20; work.nchunk = 1<<20;
work.chunk = runtime·SysAlloc(work.nchunk); work.chunk = runtime·SysAlloc(work.nchunk, &mstats.gc_sys);
if(work.chunk == nil) if(work.chunk == nil)
runtime·throw("runtime: cannot allocate memory"); runtime·throw("runtime: cannot allocate memory");
} }
...@@ -1314,12 +1314,12 @@ addroot(Obj obj) ...@@ -1314,12 +1314,12 @@ addroot(Obj obj)
cap = PageSize/sizeof(Obj); cap = PageSize/sizeof(Obj);
if(cap < 2*work.rootcap) if(cap < 2*work.rootcap)
cap = 2*work.rootcap; cap = 2*work.rootcap;
new = (Obj*)runtime·SysAlloc(cap*sizeof(Obj)); new = (Obj*)runtime·SysAlloc(cap*sizeof(Obj), &mstats.gc_sys);
if(new == nil) if(new == nil)
runtime·throw("runtime: cannot allocate memory"); runtime·throw("runtime: cannot allocate memory");
if(work.roots != nil) { if(work.roots != nil) {
runtime·memmove(new, work.roots, work.rootcap*sizeof(Obj)); runtime·memmove(new, work.roots, work.rootcap*sizeof(Obj));
runtime·SysFree(work.roots, work.rootcap*sizeof(Obj)); runtime·SysFree(work.roots, work.rootcap*sizeof(Obj), &mstats.gc_sys);
} }
work.roots = new; work.roots = new;
work.rootcap = cap; work.rootcap = cap;
...@@ -1583,7 +1583,7 @@ handlespecial(byte *p, uintptr size) ...@@ -1583,7 +1583,7 @@ handlespecial(byte *p, uintptr size)
runtime·lock(&finlock); runtime·lock(&finlock);
if(finq == nil || finq->cnt == finq->cap) { if(finq == nil || finq->cnt == finq->cap) {
if(finc == nil) { if(finc == nil) {
finc = runtime·persistentalloc(PageSize, 0); finc = runtime·persistentalloc(PageSize, 0, &mstats.gc_sys);
finc->cap = (PageSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1; finc->cap = (PageSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
finc->alllink = allfin; finc->alllink = allfin;
allfin = finc; allfin = finc;
...@@ -1869,7 +1869,11 @@ updatememstats(GCStats *stats) ...@@ -1869,7 +1869,11 @@ updatememstats(GCStats *stats)
} }
} }
mstats.stacks_inuse = stacks_inuse; mstats.stacks_inuse = stacks_inuse;
mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
mstats.mspan_inuse = runtime·mheap.spanalloc.inuse;
mstats.sys = mstats.heap_sys + mstats.stacks_sys + mstats.mspan_sys +
mstats.mcache_sys + mstats.buckhash_sys + mstats.gc_sys + mstats.other_sys;
// Calculate memory allocator stats. // Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory. // During program execution we only count number of frees and amount of freed memory.
// Current number of alive object in the heap and amount of alive heap memory // Current number of alive object in the heap and amount of alive heap memory
...@@ -2517,6 +2521,6 @@ runtime·MHeap_MapBits(MHeap *h) ...@@ -2517,6 +2521,6 @@ runtime·MHeap_MapBits(MHeap *h)
if(h->bitmap_mapped >= n) if(h->bitmap_mapped >= n)
return; return;
runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped); runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, &mstats.gc_sys);
h->bitmap_mapped = n; h->bitmap_mapped = n;
} }
...@@ -36,12 +36,12 @@ RecordSpan(void *vh, byte *p) ...@@ -36,12 +36,12 @@ RecordSpan(void *vh, byte *p)
cap = 64*1024/sizeof(all[0]); cap = 64*1024/sizeof(all[0]);
if(cap < h->nspancap*3/2) if(cap < h->nspancap*3/2)
cap = h->nspancap*3/2; cap = h->nspancap*3/2;
all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0])); all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0]), &mstats.other_sys);
if(all == nil) if(all == nil)
runtime·throw("runtime: cannot allocate memory"); runtime·throw("runtime: cannot allocate memory");
if(h->allspans) { if(h->allspans) {
runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0])); runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0])); runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
} }
h->allspans = all; h->allspans = all;
h->nspancap = cap; h->nspancap = cap;
...@@ -55,8 +55,8 @@ runtime·MHeap_Init(MHeap *h) ...@@ -55,8 +55,8 @@ runtime·MHeap_Init(MHeap *h)
{ {
uint32 i; uint32 i;
runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h); runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil); runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
// h->mapcache needs no init // h->mapcache needs no init
for(i=0; i<nelem(h->free); i++) for(i=0; i<nelem(h->free); i++)
runtime·MSpanList_Init(&h->free[i]); runtime·MSpanList_Init(&h->free[i]);
...@@ -78,7 +78,7 @@ runtime·MHeap_MapSpans(MHeap *h) ...@@ -78,7 +78,7 @@ runtime·MHeap_MapSpans(MHeap *h)
n = ROUND(n, PageSize); n = ROUND(n, PageSize);
if(h->spans_mapped >= n) if(h->spans_mapped >= n)
return; return;
runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped); runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, &mstats.other_sys);
h->spans_mapped = n; h->spans_mapped = n;
} }
...@@ -164,8 +164,6 @@ HaveSpan: ...@@ -164,8 +164,6 @@ HaveSpan:
if(s->npages > npage) { if(s->npages > npage) {
// Trim extra and put it back in the heap. // Trim extra and put it back in the heap.
t = runtime·FixAlloc_Alloc(&h->spanalloc); t = runtime·FixAlloc_Alloc(&h->spanalloc);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
runtime·MSpan_Init(t, s->start + npage, s->npages - npage); runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
s->npages = npage; s->npages = npage;
p = t->start; p = t->start;
...@@ -251,13 +249,10 @@ MHeap_Grow(MHeap *h, uintptr npage) ...@@ -251,13 +249,10 @@ MHeap_Grow(MHeap *h, uintptr npage)
return false; return false;
} }
} }
mstats.heap_sys += ask;
// Create a fake "in use" span and free it, so that the // Create a fake "in use" span and free it, so that the
// right coalescing happens. // right coalescing happens.
s = runtime·FixAlloc_Alloc(&h->spanalloc); s = runtime·FixAlloc_Alloc(&h->spanalloc);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
p = s->start; p = s->start;
if(sizeof(void*) == 8) if(sizeof(void*) == 8)
...@@ -363,8 +358,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) ...@@ -363,8 +358,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
runtime·MSpanList_Remove(t); runtime·MSpanList_Remove(t);
t->state = MSpanDead; t->state = MSpanDead;
runtime·FixAlloc_Free(&h->spanalloc, t); runtime·FixAlloc_Free(&h->spanalloc, t);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
} }
if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) { if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
if(t->npreleased == 0) { // cant't touch this otherwise if(t->npreleased == 0) { // cant't touch this otherwise
...@@ -377,8 +370,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) ...@@ -377,8 +370,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
runtime·MSpanList_Remove(t); runtime·MSpanList_Remove(t);
t->state = MSpanDead; t->state = MSpanDead;
runtime·FixAlloc_Free(&h->spanalloc, t); runtime·FixAlloc_Free(&h->spanalloc, t);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
} }
// Insert s into appropriate list. // Insert s into appropriate list.
......
...@@ -70,10 +70,9 @@ stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc) ...@@ -70,10 +70,9 @@ stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc)
Bucket *b; Bucket *b;
if(buckhash == nil) { if(buckhash == nil) {
buckhash = runtime·SysAlloc(BuckHashSize*sizeof buckhash[0]); buckhash = runtime·SysAlloc(BuckHashSize*sizeof buckhash[0], &mstats.buckhash_sys);
if(buckhash == nil) if(buckhash == nil)
runtime·throw("runtime: cannot allocate memory"); runtime·throw("runtime: cannot allocate memory");
mstats.buckhash_sys += BuckHashSize*sizeof buckhash[0];
} }
// Hash stack. // Hash stack.
...@@ -95,7 +94,7 @@ stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc) ...@@ -95,7 +94,7 @@ stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc)
if(!alloc) if(!alloc)
return nil; return nil;
b = runtime·persistentalloc(sizeof *b + nstk*sizeof stk[0], 0); b = runtime·persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats.buckhash_sys);
bucketmem += sizeof *b + nstk*sizeof stk[0]; bucketmem += sizeof *b + nstk*sizeof stk[0];
runtime·memmove(b->stk, stk, nstk*sizeof stk[0]); runtime·memmove(b->stk, stk, nstk*sizeof stk[0]);
b->typ = typ; b->typ = typ;
...@@ -197,7 +196,7 @@ setaddrbucket(uintptr addr, Bucket *b) ...@@ -197,7 +196,7 @@ setaddrbucket(uintptr addr, Bucket *b)
if(ah->addr == (addr>>AddrHashShift)) if(ah->addr == (addr>>AddrHashShift))
goto found; goto found;
ah = runtime·persistentalloc(sizeof *ah, 0); ah = runtime·persistentalloc(sizeof *ah, 0, &mstats.buckhash_sys);
addrmem += sizeof *ah; addrmem += sizeof *ah;
ah->next = addrhash[h]; ah->next = addrhash[h];
ah->addr = addr>>AddrHashShift; ah->addr = addr>>AddrHashShift;
...@@ -205,7 +204,7 @@ setaddrbucket(uintptr addr, Bucket *b) ...@@ -205,7 +204,7 @@ setaddrbucket(uintptr addr, Bucket *b)
found: found:
if((e = addrfree) == nil) { if((e = addrfree) == nil) {
e = runtime·persistentalloc(64*sizeof *e, 0); e = runtime·persistentalloc(64*sizeof *e, 0, &mstats.buckhash_sys);
addrmem += 64*sizeof *e; addrmem += 64*sizeof *e;
for(i=0; i+1<64; i++) for(i=0; i+1<64; i++)
e[i].next = &e[i+1]; e[i].next = &e[i+1];
...@@ -529,5 +528,5 @@ func GoroutineProfile(b Slice) (n int, ok bool) { ...@@ -529,5 +528,5 @@ func GoroutineProfile(b Slice) (n int, ok bool) {
void void
runtime·mprofinit(void) runtime·mprofinit(void)
{ {
addrhash = runtime·persistentalloc((1<<AddrHashBits)*sizeof *addrhash, 0); addrhash = runtime·persistentalloc((1<<AddrHashBits)*sizeof *addrhash, 0, &mstats.buckhash_sys);
} }
...@@ -379,7 +379,7 @@ allocPollDesc(void) ...@@ -379,7 +379,7 @@ allocPollDesc(void)
n = 1; n = 1;
// Must be in non-GC memory because can be referenced // Must be in non-GC memory because can be referenced
// only from epoll/kqueue internals. // only from epoll/kqueue internals.
pd = runtime·persistentalloc(n*sizeof(*pd), 0); pd = runtime·persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
for(i = 0; i < n; i++) { for(i = 0; i < n; i++) {
pd[i].link = pollcache.first; pd[i].link = pollcache.first;
pollcache.first = &pd[i]; pollcache.first = &pd[i];
......
...@@ -36,10 +36,9 @@ stackcacherefill(void) ...@@ -36,10 +36,9 @@ stackcacherefill(void)
stackcache = n->next; stackcache = n->next;
runtime·unlock(&stackcachemu); runtime·unlock(&stackcachemu);
if(n == nil) { if(n == nil) {
n = (StackCacheNode*)runtime·SysAlloc(FixedStack*StackCacheBatch); n = (StackCacheNode*)runtime·SysAlloc(FixedStack*StackCacheBatch, &mstats.stacks_sys);
if(n == nil) if(n == nil)
runtime·throw("out of memory (stackcacherefill)"); runtime·throw("out of memory (stackcacherefill)");
runtime·xadd64(&mstats.stacks_sys, FixedStack*StackCacheBatch);
for(i = 0; i < StackCacheBatch-1; i++) for(i = 0; i < StackCacheBatch-1; i++)
n->batch[i] = (byte*)n + (i+1)*FixedStack; n->batch[i] = (byte*)n + (i+1)*FixedStack;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment