Commit 0e840887 authored by Russ Cox's avatar Russ Cox

runtime: change tinyalloc, persistentalloc not to point past allocated data

During all.bash I got a crash in the GOMAXPROCS=2 runtime test reporting
that the write barrier in the assignment 'c.tiny = add(x, size)' had been
given a pointer pointing into an unexpected span. The problem is that
the tiny allocation was at the end of a span and c.tiny was now pointing
to the end of the allocation and therefore to the end of the span aka
the beginning of the next span.

Rewrite tinyalloc not to do that.

More generally, it's not okay to call add(p, size) unless you know that p
points at > (not just >=) size bytes. Similarly, pretty much any call to
roundup doesn't know how much space p points at, so those are all
broken.

Rewrite persistentalloc not to use add(p, totalsize) and not to use roundup.

There is only one use of roundup left, in vprintf, which is dead code.
I will remove that code and roundup itself in a followup CL.

Change-Id: I211e307d1a656d29087b8fd40b2b71010722fb4a
Reviewed-on: https://go-review.googlesource.com/2814Reviewed-by: default avatarDmitry Vyukov <dvyukov@google.com>
parent bfeda918
...@@ -119,40 +119,35 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -119,40 +119,35 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// standalone escaping variables. On a json benchmark // standalone escaping variables. On a json benchmark
// the allocator reduces number of allocations by ~12% and // the allocator reduces number of allocations by ~12% and
// reduces heap size by ~20%. // reduces heap size by ~20%.
tinysize := uintptr(c.tinysize) off := c.tinyoffset
if size <= tinysize { // Align tiny pointer for required (conservative) alignment.
tiny := unsafe.Pointer(c.tiny) if size&7 == 0 {
// Align tiny pointer for required (conservative) alignment. off = round(off, 8)
if size&7 == 0 { } else if size&3 == 0 {
tiny = roundup(tiny, 8) off = round(off, 4)
} else if size&3 == 0 { } else if size&1 == 0 {
tiny = roundup(tiny, 4) off = round(off, 2)
} else if size&1 == 0 { }
tiny = roundup(tiny, 2) if off+size <= maxTinySize {
} // The object fits into existing tiny block.
size1 := size + (uintptr(tiny) - uintptr(unsafe.Pointer(c.tiny))) x = add(c.tiny, off)
if size1 <= tinysize { c.tinyoffset = off + size
// The object fits into existing tiny block. c.local_tinyallocs++
x = tiny if debugMalloc {
c.tiny = (*byte)(add(x, size)) mp := acquirem()
c.tinysize -= uintptr(size1) if mp.mallocing == 0 {
c.local_tinyallocs++ throw("bad malloc")
if debugMalloc { }
mp := acquirem() mp.mallocing = 0
if mp.mallocing == 0 { if mp.curg != nil {
throw("bad malloc") mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
}
mp.mallocing = 0
if mp.curg != nil {
mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
}
// Note: one releasem for the acquirem just above.
// The other for the acquirem at start of malloc.
releasem(mp)
releasem(mp)
} }
return x // Note: one releasem for the acquirem just above.
// The other for the acquirem at start of malloc.
releasem(mp)
releasem(mp)
} }
return x
} }
// Allocate a new maxTinySize block. // Allocate a new maxTinySize block.
s = c.alloc[tinySizeClass] s = c.alloc[tinySizeClass]
...@@ -173,9 +168,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -173,9 +168,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
(*[2]uint64)(x)[1] = 0 (*[2]uint64)(x)[1] = 0
// See if we need to replace the existing tiny block with the new one // See if we need to replace the existing tiny block with the new one
// based on amount of remaining free space. // based on amount of remaining free space.
if maxTinySize-size > tinysize { if size < c.tinyoffset {
c.tiny = (*byte)(add(x, size)) c.tiny = x
c.tinysize = uintptr(maxTinySize - size) c.tinyoffset = size
} }
size = maxTinySize size = maxTinySize
} else { } else {
...@@ -1013,8 +1008,8 @@ func runfinq() { ...@@ -1013,8 +1008,8 @@ func runfinq() {
var persistent struct { var persistent struct {
lock mutex lock mutex
pos unsafe.Pointer base unsafe.Pointer
end unsafe.Pointer off uintptr
} }
// Wrapper around sysAlloc that can allocate small chunks. // Wrapper around sysAlloc that can allocate small chunks.
...@@ -1027,6 +1022,9 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer { ...@@ -1027,6 +1022,9 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
) )
if size == 0 {
throw("persistentalloc: size == 0")
}
if align != 0 { if align != 0 {
if align&(align-1) != 0 { if align&(align-1) != 0 {
throw("persistentalloc: align is not a power of 2") throw("persistentalloc: align is not a power of 2")
...@@ -1043,17 +1041,17 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer { ...@@ -1043,17 +1041,17 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
} }
lock(&persistent.lock) lock(&persistent.lock)
persistent.pos = roundup(persistent.pos, align) persistent.off = round(persistent.off, align)
if uintptr(persistent.pos)+size > uintptr(persistent.end) { if persistent.off+size > chunk {
persistent.pos = sysAlloc(chunk, &memstats.other_sys) persistent.base = sysAlloc(chunk, &memstats.other_sys)
if persistent.pos == nil { if persistent.base == nil {
unlock(&persistent.lock) unlock(&persistent.lock)
throw("runtime: cannot allocate memory") throw("runtime: cannot allocate memory")
} }
persistent.end = add(persistent.pos, chunk) persistent.off = 0
} }
p := persistent.pos p := add(persistent.base, persistent.off)
persistent.pos = add(persistent.pos, size) persistent.off += size
unlock(&persistent.lock) unlock(&persistent.lock)
if stat != &memstats.other_sys { if stat != &memstats.other_sys {
......
...@@ -323,8 +323,8 @@ type mcache struct { ...@@ -323,8 +323,8 @@ type mcache struct {
local_cachealloc intptr // bytes allocated (or freed) from cache since last lock of heap local_cachealloc intptr // bytes allocated (or freed) from cache since last lock of heap
// Allocator cache for tiny objects w/o pointers. // Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.goc. // See "Tiny allocator" comment in malloc.goc.
tiny *byte tiny unsafe.Pointer
tinysize uintptr tinyoffset uintptr
local_tinyallocs uintptr // number of tiny allocs not counted in other stats local_tinyallocs uintptr // number of tiny allocs not counted in other stats
// The rest is not accessed on every malloc. // The rest is not accessed on every malloc.
......
...@@ -52,7 +52,7 @@ func clearpools() { ...@@ -52,7 +52,7 @@ func clearpools() {
// clear tinyalloc pool // clear tinyalloc pool
if c := p.mcache; c != nil { if c := p.mcache; c != nil {
c.tiny = nil c.tiny = nil
c.tinysize = 0 c.tinyoffset = 0
// disconnect cached list before dropping it on the floor, // disconnect cached list before dropping it on the floor,
// so that a dangling ref to one entry does not pin all of them. // so that a dangling ref to one entry does not pin all of them.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment