malloc.go 27.6 KB
Newer Older
1 2 3 4
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

Russ Cox's avatar
Russ Cox committed
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
// Memory allocator, based on tcmalloc.
// http://goog-perftools.sourceforge.net/doc/tcmalloc.html

// The main allocator works in runs of pages.
// Small allocation sizes (up to and including 32 kB) are
// rounded to one of about 100 size classes, each of which
// has its own free list of objects of exactly that size.
// Any free page of memory can be split into a set of objects
// of one size class, which are then managed using free list
// allocators.
//
// The allocator's data structures are:
//
//	FixAlloc: a free-list allocator for fixed-size objects,
//		used to manage storage used by the allocator.
//	MHeap: the malloc heap, managed at page (4096-byte) granularity.
//	MSpan: a run of pages managed by the MHeap.
//	MCentral: a shared free list for a given size class.
//	MCache: a per-thread (in Go, per-P) cache for small objects.
//	MStats: allocation statistics.
//
// Allocating a small object proceeds up a hierarchy of caches:
//
//	1. Round the size up to one of the small size classes
//	   and look in the corresponding MCache free list.
//	   If the list is not empty, allocate an object from it.
//	   This can all be done without acquiring a lock.
//
//	2. If the MCache free list is empty, replenish it by
//	   taking a bunch of objects from the MCentral free list.
//	   Moving a bunch amortizes the cost of acquiring the MCentral lock.
//
//	3. If the MCentral free list is empty, replenish it by
//	   allocating a run of pages from the MHeap and then
//	   chopping that memory into objects of the given size.
//	   Allocating many objects amortizes the cost of locking
//	   the heap.
//
//	4. If the MHeap is empty or has no page runs large enough,
//	   allocate a new group of pages (at least 1MB) from the
//	   operating system.  Allocating a large run of pages
//	   amortizes the cost of talking to the operating system.
//
// Freeing a small object proceeds up the same hierarchy:
//
//	1. Look up the size class for the object and add it to
//	   the MCache free list.
//
//	2. If the MCache free list is too long or the MCache has
//	   too much memory, return some to the MCentral free lists.
//
//	3. If all the objects in a given span have returned to
//	   the MCentral list, return that span to the page heap.
//
//	4. If the heap has too much memory, return some to the
//	   operating system.
//
//	TODO(rsc): Step 4 is not implemented.
//
// Allocating and freeing a large object uses the page heap
// directly, bypassing the MCache and MCentral free lists.
//
// The small objects on the MCache and MCentral free lists
// may or may not be zeroed.  They are zeroed if and only if
// the second word of the object is zero.  A span in the
// page heap is zeroed unless s->needzero is set. When a span
// is allocated to break into small objects, it is zeroed if needed
// and s->needzero is set. There are two main benefits to delaying the
// zeroing this way:
//
//	1. stack frames allocated from the small object lists
//	   or the page heap can avoid zeroing altogether.
//	2. the cost of zeroing when reusing a small object is
//	   charged to the mutator, not the garbage collector.
//
// This code was written with an eye toward translating to Go
// in the future.  Methods have the form Type_Method(Type *t, ...).

83 84
package runtime

85
import "unsafe"
86 87

const (
88 89
	debugMalloc = false

90 91
	flagNoScan = _FlagNoScan
	flagNoZero = _FlagNoZero
92

93 94 95
	maxTinySize   = _TinySize
	tinySizeClass = _TinySizeClass
	maxSmallSize  = _MaxSmallSize
96

97 98 99
	pageShift = _PageShift
	pageSize  = _PageSize
	pageMask  = _PageMask
100

101
	mSpanInUse = _MSpanInUse
102

103
	concurrentSweep = _ConcurrentSweep
104 105
)

Russ Cox's avatar
Russ Cox committed
106 107 108 109 110 111 112 113 114 115 116
const (
	_PageShift = 13
	_PageSize  = 1 << _PageShift
	_PageMask  = _PageSize - 1
)

const (
	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
	_64bit = 1 << (^uintptr(0) >> 63) / 2

	// Computed constant.  The definition of MaxSmallSize and the
117
	// algorithm in msize.go produces some number of different allocation
Russ Cox's avatar
Russ Cox committed
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	// size classes.  NumSizeClasses is that number.  It's needed here
	// because there are static arrays of this length; when msize runs its
	// size choosing algorithm it double-checks that NumSizeClasses agrees.
	_NumSizeClasses = 67

	// Tunable constants.
	_MaxSmallSize = 32 << 10

	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
	_TinySize      = 16
	_TinySizeClass = 2

	_FixAllocChunk  = 16 << 10               // Chunk size for FixAlloc
	_MaxMHeapList   = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
	_HeapAllocChunk = 1 << 20                // Chunk size for heap growth

	// Per-P, per order stack segment cache size.
	_StackCacheSize = 32 * 1024

	// Number of orders that get caching.  Order 0 is FixedStack
	// and each successive order is twice as large.
	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks.  Larger stacks
	// will be allocated directly.
	// Since FixedStack is different on different systems, we
	// must vary NumStackOrders to keep the same maximum cached size.
	//   OS               | FixedStack | NumStackOrders
	//   -----------------+------------+---------------
	//   linux/darwin/bsd | 2KB        | 4
	//   windows/32       | 4KB        | 3
	//   windows/64       | 8KB        | 2
	//   plan9            | 4KB        | 3
	_NumStackOrders = 4 - ptrSize/4*goos_windows - 1*goos_plan9

	// Number of bits in page to span calculations (4k pages).
	// On Windows 64-bit we limit the arena to 32GB or 35 bits.
	// Windows counts memory used by page table into committed memory
	// of the process, so we can't reserve too much memory.
	// See http://golang.org/issue/5402 and http://golang.org/issue/5236.
	// On other 64-bit platforms, we limit the arena to 128GB, or 37 bits.
	// On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
158 159 160 161
	// On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
	// but as most devices have less than 4GB of physical memory anyway, we
	// try to be conservative here, and only ask for a 2GB heap.
	_MHeapMap_TotalBits = (_64bit*goos_windows)*35 + (_64bit*(1-goos_windows)*(1-goos_darwin*goarch_arm64))*37 + goos_darwin*goarch_arm64*31 + (1-_64bit)*32
Russ Cox's avatar
Russ Cox committed
162 163 164 165 166 167 168 169 170 171 172
	_MHeapMap_Bits      = _MHeapMap_TotalBits - _PageShift

	_MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)

	// Max number of threads to run garbage collection.
	// 2, 3, and 4 are all plausible maximums depending
	// on the hardware details of the machine.  The garbage
	// collector scales well to 32 cpus.
	_MaxGcproc = 32
)

173 174 175
// Page number (address>>pageShift)
type pageID uintptr

Russ Cox's avatar
Russ Cox committed
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
const _MaxArena32 = 2 << 30

// OS-defined helpers:
//
// sysAlloc obtains a large chunk of zeroed memory from the
// operating system, typically on the order of a hundred kilobytes
// or a megabyte.
// NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
// may use larger alignment, so the caller must be careful to realign the
// memory obtained by sysAlloc.
//
// SysUnused notifies the operating system that the contents
// of the memory region are no longer needed and can be reused
// for other purposes.
// SysUsed notifies the operating system that the contents
// of the memory region are needed again.
//
// SysFree returns it unconditionally; this is only used if
// an out-of-memory error has been detected midway through
// an allocation.  It is okay if SysFree is a no-op.
//
// SysReserve reserves address space without allocating memory.
// If the pointer passed to it is non-nil, the caller wants the
// reservation there, but SysReserve can still choose another
// location if that one is unavailable.  On some systems and in some
// cases SysReserve will simply check that the address space is
// available and not actually reserve it.  If SysReserve returns
// non-nil, it sets *reserved to true if the address space is
// reserved, false if it has merely been checked.
// NOTE: SysReserve returns OS-aligned memory, but the heap allocator
// may use larger alignment, so the caller must be careful to realign the
// memory obtained by sysAlloc.
//
// SysMap maps previously reserved address space for use.
// The reserved argument is true if the address space was really
// reserved, not merely checked.
//
// SysFault marks a (already sysAlloc'd) region to fault
// if accessed.  Used only for debugging the runtime.

func mallocinit() {
	initSizes()

	if class_to_size[_TinySizeClass] != _TinySize {
		throw("bad TinySizeClass")
	}

	var p, bitmapSize, spansSize, pSize, limit uintptr
	var reserved bool

	// limit = runtime.memlimit();
	// See https://golang.org/issue/5049
	// TODO(rsc): Fix after 1.1.
	limit = 0

	// Set up the allocation arena, a contiguous area of memory where
	// allocated data will be found.  The arena begins with a bitmap large
	// enough to hold 4 bits per allocated word.
	if ptrSize == 8 && (limit == 0 || limit > 1<<30) {
		// On a 64-bit machine, allocate from a single contiguous reservation.
		// 128 GB (MaxMem) should be big enough for now.
		//
		// The code will work with the reservation at any address, but ask
		// SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
		// Allocating a 128 GB region takes away 37 bits, and the amd64
		// doesn't let us choose the top 17 bits, so that leaves the 11 bits
		// in the middle of 0x00c0 for us to choose.  Choosing 0x00c0 means
		// that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
		// In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
		// UTF-8 sequences, and they are otherwise as far away from
		// ff (likely a common byte) as possible.  If that fails, we try other 0xXXc0
		// addresses.  An earlier attempt to use 0x11f8 caused out of memory errors
		// on OS X during thread allocations.  0x00c0 causes conflicts with
		// AddressSanitizer which reserves all memory up to 0x0100.
		// These choices are both for debuggability and to reduce the
		// odds of the conservative garbage collector not collecting memory
		// because some non-pointer block of memory had a bit pattern
		// that matched a memory address.
		//
		// Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
		// but it hardly matters: e0 00 is not valid UTF-8 either.
		//
		// If this fails we fall back to the 32 bit memory mechanism
259 260 261 262
		//
		// However, on arm64, we ignore all this advice above and slam the
		// allocation at 0x40 << 32 because when using 4k pages with 3-level
		// translation buffers, the user address space is limited to 39 bits
263
		// On darwin/arm64, the address space is even smaller.
Russ Cox's avatar
Russ Cox committed
264 265 266 267 268
		arenaSize := round(_MaxMem, _PageSize)
		bitmapSize = arenaSize / (ptrSize * 8 / 4)
		spansSize = arenaSize / _PageSize * ptrSize
		spansSize = round(spansSize, _PageSize)
		for i := 0; i <= 0x7f; i++ {
269 270 271 272
			switch {
			case GOARCH == "arm64" && GOOS == "darwin":
				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
			case GOARCH == "arm64":
273
				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
274
			default:
275 276
				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
			}
Russ Cox's avatar
Russ Cox committed
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
			pSize = bitmapSize + spansSize + arenaSize + _PageSize
			p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
			if p != 0 {
				break
			}
		}
	}

	if p == 0 {
		// On a 32-bit machine, we can't typically get away
		// with a giant virtual address space reservation.
		// Instead we map the memory information bitmap
		// immediately after the data segment, large enough
		// to handle another 2GB of mappings (256 MB),
		// along with a reservation for an initial arena.
		// When that gets used up, we'll start asking the kernel
		// for any memory anywhere and hope it's in the 2GB
		// following the bitmap (presumably the executable begins
		// near the bottom of memory, so we'll have to use up
		// most of memory before the kernel resorts to giving out
		// memory before the beginning of the text segment).
		//
		// Alternatively we could reserve 512 MB bitmap, enough
		// for 4GB of mappings, and then accept any memory the
		// kernel threw at us, but normally that's a waste of 512 MB
		// of address space, which is probably too much in a 32-bit world.

		// If we fail to allocate, try again with a smaller arena.
		// This is necessary on Android L where we share a process
		// with ART, which reserves virtual memory aggressively.
		arenaSizes := []uintptr{
			512 << 20,
			256 << 20,
310
			128 << 20,
Russ Cox's avatar
Russ Cox committed
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
		}

		for _, arenaSize := range arenaSizes {
			bitmapSize = _MaxArena32 / (ptrSize * 8 / 4)
			spansSize = _MaxArena32 / _PageSize * ptrSize
			if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
				bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
				arenaSize = bitmapSize * 8
				spansSize = arenaSize / _PageSize * ptrSize
			}
			spansSize = round(spansSize, _PageSize)

			// SysReserve treats the address we ask for, end, as a hint,
			// not as an absolute requirement.  If we ask for the end
			// of the data segment but the operating system requires
			// a little more space before we can start allocating, it will
			// give out a slightly higher pointer.  Except QEMU, which
			// is buggy, as usual: it won't adjust the pointer upward.
			// So adjust it upward a little bit ourselves: 1/4 MB to get
			// away from the running binary image and then round up
			// to a MB boundary.
332
			p = round(firstmoduledata.end+(1<<18), 1<<20)
Russ Cox's avatar
Russ Cox committed
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
			pSize = bitmapSize + spansSize + arenaSize + _PageSize
			p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
			if p != 0 {
				break
			}
		}
		if p == 0 {
			throw("runtime: cannot reserve arena virtual address space")
		}
	}

	// PageSize can be larger than OS definition of page size,
	// so SysReserve can give us a PageSize-unaligned pointer.
	// To overcome this we ask for PageSize more and round up the pointer.
	p1 := round(p, _PageSize)

	mheap_.spans = (**mspan)(unsafe.Pointer(p1))
	mheap_.bitmap = p1 + spansSize
	mheap_.arena_start = p1 + (spansSize + bitmapSize)
	mheap_.arena_used = mheap_.arena_start
	mheap_.arena_end = p + pSize
	mheap_.arena_reserved = reserved

	if mheap_.arena_start&(_PageSize-1) != 0 {
		println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
		throw("misrounded allocation in mallocinit")
	}

	// Initialize the rest of the allocator.
	mHeap_Init(&mheap_, spansSize)
	_g_ := getg()
	_g_.m.mcache = allocmcache()
}

// sysReserveHigh reserves space somewhere high in the address space.
// sysReserve doesn't actually reserve the full amount requested on
// 64-bit systems, because of problems with ulimit. Instead it checks
// that it can get the first 64 kB and assumes it can grab the rest as
// needed. This doesn't work well with the "let the kernel pick an address"
// mode, so don't do that. Pick a high address instead.
func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer {
	if ptrSize == 4 {
		return sysReserve(nil, n, reserved)
	}

	for i := 0; i <= 0x7f; i++ {
		p := uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
		*reserved = false
		p = uintptr(sysReserve(unsafe.Pointer(p), n, reserved))
		if p != 0 {
			return unsafe.Pointer(p)
		}
	}

	return sysReserve(nil, n, reserved)
}

func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
	if n > uintptr(h.arena_end)-uintptr(h.arena_used) {
		// We are in 32-bit mode, maybe we didn't use all possible address space yet.
		// Reserve some more space.
		p_size := round(n+_PageSize, 256<<20)
		new_end := h.arena_end + p_size
		if new_end <= h.arena_start+_MaxArena32 {
			// TODO: It would be bad if part of the arena
			// is reserved and part is not.
			var reserved bool
			p := uintptr(sysReserve((unsafe.Pointer)(h.arena_end), p_size, &reserved))
			if p == h.arena_end {
				h.arena_end = new_end
				h.arena_reserved = reserved
			} else if p+p_size <= h.arena_start+_MaxArena32 {
				// Keep everything page-aligned.
				// Our pages are bigger than hardware pages.
				h.arena_end = p + p_size
				h.arena_used = p + (-uintptr(p) & (_PageSize - 1))
				h.arena_reserved = reserved
			} else {
				var stat uint64
				sysFree((unsafe.Pointer)(p), p_size, &stat)
			}
		}
	}

	if n <= uintptr(h.arena_end)-uintptr(h.arena_used) {
		// Keep taking from our reservation.
		p := h.arena_used
		sysMap((unsafe.Pointer)(p), n, h.arena_reserved, &memstats.heap_sys)
421 422
		mHeap_MapBits(h, p+n)
		mHeap_MapSpans(h, p+n)
Russ Cox's avatar
Russ Cox committed
423
		h.arena_used = p + n
Russ Cox's avatar
Russ Cox committed
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
		if raceenabled {
			racemapshadow((unsafe.Pointer)(p), n)
		}

		if uintptr(p)&(_PageSize-1) != 0 {
			throw("misrounded allocation in MHeap_SysAlloc")
		}
		return (unsafe.Pointer)(p)
	}

	// If using 64-bit, our reservation is all we have.
	if uintptr(h.arena_end)-uintptr(h.arena_start) >= _MaxArena32 {
		return nil
	}

	// On 32-bit, once the reservation is gone we can
	// try to get memory at a location chosen by the OS
	// and hope that it is in the range we allocated bitmap for.
	p_size := round(n, _PageSize) + _PageSize
	p := uintptr(sysAlloc(p_size, &memstats.heap_sys))
	if p == 0 {
		return nil
	}

	if p < h.arena_start || uintptr(p)+p_size-uintptr(h.arena_start) >= _MaxArena32 {
		print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n")
		sysFree((unsafe.Pointer)(p), p_size, &memstats.heap_sys)
		return nil
	}

	p_end := p + p_size
	p += -p & (_PageSize - 1)
	if uintptr(p)+n > uintptr(h.arena_used) {
457 458
		mHeap_MapBits(h, p+n)
		mHeap_MapSpans(h, p+n)
Russ Cox's avatar
Russ Cox committed
459
		h.arena_used = p + n
Russ Cox's avatar
Russ Cox committed
460 461 462 463 464 465 466 467 468 469 470 471 472 473
		if p_end > h.arena_end {
			h.arena_end = p_end
		}
		if raceenabled {
			racemapshadow((unsafe.Pointer)(p), n)
		}
	}

	if uintptr(p)&(_PageSize-1) != 0 {
		throw("misrounded allocation in MHeap_SysAlloc")
	}
	return (unsafe.Pointer)(p)
}

474 475
// base address for all 0-byte allocations
var zerobase uintptr
476

Russ Cox's avatar
Russ Cox committed
477 478 479 480 481
const (
	// flags to malloc
	_FlagNoScan = 1 << 0 // GC doesn't have to scan object
	_FlagNoZero = 1 << 1 // don't zero memory
)
482

483 484
// Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists.
485
// Large objects (> 32 kB) are allocated straight from the heap.
486
func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
487 488 489
	if gcphase == _GCmarktermination {
		throw("mallocgc called with gcphase == _GCmarktermination")
	}
490

491
	if size == 0 {
492
		return unsafe.Pointer(&zerobase)
493 494
	}

495
	if flags&flagNoScan == 0 && typ == nil {
496
		throw("malloc missing type")
497 498
	}

499 500 501 502 503 504 505 506
	if debug.sbrk != 0 {
		align := uintptr(16)
		if typ != nil {
			align = uintptr(typ.align)
		}
		return persistentalloc(size, align, &memstats.other_sys)
	}

507 508 509 510
	// Set mp.mallocing to keep from being preempted by GC.
	mp := acquirem()
	if mp.mallocing != 0 {
		throw("malloc deadlock")
511
	}
512 513 514
	if mp.gsignal == getg() {
		throw("malloc during signal")
	}
515
	mp.mallocing = 1
516

517 518
	shouldhelpgc := false
	dataSize := size
519
	c := gomcache()
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
	var s *mspan
	var x unsafe.Pointer
	if size <= maxSmallSize {
		if flags&flagNoScan != 0 && size < maxTinySize {
			// Tiny allocator.
			//
			// Tiny allocator combines several tiny allocation requests
			// into a single memory block. The resulting memory block
			// is freed when all subobjects are unreachable. The subobjects
			// must be FlagNoScan (don't have pointers), this ensures that
			// the amount of potentially wasted memory is bounded.
			//
			// Size of the memory block used for combining (maxTinySize) is tunable.
			// Current setting is 16 bytes, which relates to 2x worst case memory
			// wastage (when all but one subobjects are unreachable).
			// 8 bytes would result in no wastage at all, but provides less
			// opportunities for combining.
			// 32 bytes provides more opportunities for combining,
			// but can lead to 4x worst case wastage.
			// The best case winning is 8x regardless of block size.
			//
			// Objects obtained from tiny allocator must not be freed explicitly.
			// So when an object will be freed explicitly, we ensure that
			// its size >= maxTinySize.
			//
			// SetFinalizer has a special case for objects potentially coming
			// from tiny allocator, it such case it allows to set finalizers
			// for an inner byte of a memory block.
			//
			// The main targets of tiny allocator are small strings and
			// standalone escaping variables. On a json benchmark
			// the allocator reduces number of allocations by ~12% and
			// reduces heap size by ~20%.
553 554 555 556 557 558 559 560 561
			off := c.tinyoffset
			// Align tiny pointer for required (conservative) alignment.
			if size&7 == 0 {
				off = round(off, 8)
			} else if size&3 == 0 {
				off = round(off, 4)
			} else if size&1 == 0 {
				off = round(off, 2)
			}
562
			if off+size <= maxTinySize && c.tiny != nil {
563 564 565 566
				// The object fits into existing tiny block.
				x = add(c.tiny, off)
				c.tinyoffset = off + size
				c.local_tinyallocs++
567 568
				mp.mallocing = 0
				releasem(mp)
569
				return x
570 571 572 573
			}
			// Allocate a new maxTinySize block.
			s = c.alloc[tinySizeClass]
			v := s.freelist
574
			if v.ptr() == nil {
575
				systemstack(func() {
576 577
					mCache_Refill(c, tinySizeClass)
				})
578
				shouldhelpgc = true
579 580 581
				s = c.alloc[tinySizeClass]
				v = s.freelist
			}
582
			s.freelist = v.ptr().next
583
			s.ref++
584 585
			// prefetchnta offers best performance, see change list message.
			prefetchnta(uintptr(v.ptr().next))
586 587 588 589 590
			x = unsafe.Pointer(v)
			(*[2]uint64)(x)[0] = 0
			(*[2]uint64)(x)[1] = 0
			// See if we need to replace the existing tiny block with the new one
			// based on amount of remaining free space.
591 592 593
			if size < c.tinyoffset {
				c.tiny = x
				c.tinyoffset = size
594 595 596 597 598 599 600 601 602 603 604 605
			}
			size = maxTinySize
		} else {
			var sizeclass int8
			if size <= 1024-8 {
				sizeclass = size_to_class8[(size+7)>>3]
			} else {
				sizeclass = size_to_class128[(size-1024+127)>>7]
			}
			size = uintptr(class_to_size[sizeclass])
			s = c.alloc[sizeclass]
			v := s.freelist
606
			if v.ptr() == nil {
607
				systemstack(func() {
608 609
					mCache_Refill(c, int32(sizeclass))
				})
610
				shouldhelpgc = true
611 612 613
				s = c.alloc[sizeclass]
				v = s.freelist
			}
614
			s.freelist = v.ptr().next
615
			s.ref++
616 617
			// prefetchnta offers best performance, see change list message.
			prefetchnta(uintptr(v.ptr().next))
618 619
			x = unsafe.Pointer(v)
			if flags&flagNoZero == 0 {
620
				v.ptr().next = 0
621 622 623 624 625
				if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 {
					memclr(unsafe.Pointer(v), size)
				}
			}
		}
626
		c.local_cachealloc += size
627
	} else {
628
		var s *mspan
629
		shouldhelpgc = true
630
		systemstack(func() {
631 632
			s = largeAlloc(size, uint32(flags))
		})
633 634 635 636
		x = unsafe.Pointer(uintptr(s.start << pageShift))
		size = uintptr(s.elemsize)
	}

637
	if flags&flagNoScan != 0 {
638 639 640 641 642 643 644 645 646 647
		// All objects are pre-marked as noscan. Nothing to do.
	} else {
		// If allocating a defer+arg block, now that we've picked a malloc size
		// large enough to hold everything, cut the "asked for" size down to
		// just the defer header, so that the GC bitmap will record the arg block
		// as containing nothing at all (as if it were unused space at the end of
		// a malloc block caused by size rounding).
		// The defer arg areas are scanned as part of scanstack.
		if typ == deferType {
			dataSize = unsafe.Sizeof(_defer{})
648
		}
649
		heapBitsSetType(uintptr(x), size, dataSize, typ)
650 651 652 653 654 655 656 657 658 659
		if dataSize > typ.size {
			// Array allocation. If there are any
			// pointers, GC has to scan to the last
			// element.
			if typ.ptrdata != 0 {
				c.local_scan += dataSize - typ.size + typ.ptrdata
			}
		} else {
			c.local_scan += typ.ptrdata
		}
660
	}
661 662 663 664 665 666

	// GCmarkterminate allocates black
	// All slots hold nil so no scanning is needed.
	// This may be racing with GC so do it atomically if there can be
	// a race marking the bit.
	if gcphase == _GCmarktermination {
667
		systemstack(func() {
668
			gcmarknewobject_m(uintptr(x), size)
669
		})
670 671
	}

672 673 674
	if raceenabled {
		racemalloc(x, size)
	}
675

676 677
	mp.mallocing = 0
	releasem(mp)
678

679 680 681
	if debug.allocfreetrace != 0 {
		tracealloc(x, size, typ)
	}
682 683 684 685 686

	if rate := MemProfileRate; rate > 0 {
		if size < uintptr(rate) && int32(size) < c.next_sample {
			c.next_sample -= int32(size)
		} else {
687
			mp := acquirem()
688
			profilealloc(mp, x, size)
689
			releasem(mp)
690 691 692
		}
	}

693
	if shouldhelpgc && shouldtriggergc() {
Russ Cox's avatar
Russ Cox committed
694
		startGC(gcBackgroundMode)
695
	} else if gcBlackenEnabled != 0 {
696 697 698 699 700
		// Assist garbage collector. We delay this until the
		// epilogue so that it doesn't interfere with the
		// inner working of malloc such as mcache refills that
		// might happen while doing the gcAssistAlloc.
		gcAssistAlloc(size, shouldhelpgc)
701 702 703 704 705
	}

	return x
}

Russ Cox's avatar
Russ Cox committed
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
func largeAlloc(size uintptr, flag uint32) *mspan {
	// print("largeAlloc size=", size, "\n")

	if size+_PageSize < size {
		throw("out of memory")
	}
	npages := size >> _PageShift
	if size&_PageMask != 0 {
		npages++
	}
	s := mHeap_Alloc(&mheap_, npages, 0, true, flag&_FlagNoZero == 0)
	if s == nil {
		throw("out of memory")
	}
	s.limit = uintptr(s.start)<<_PageShift + size
	heapBitsForSpan(s.base()).initSpan(s.layout())
	return s
}

725 726
// implementation of new builtin
func newobject(typ *_type) unsafe.Pointer {
727
	flags := uint32(0)
728 729 730
	if typ.kind&kindNoPointers != 0 {
		flags |= flagNoScan
	}
731
	return mallocgc(uintptr(typ.size), typ, flags)
732 733
}

Russ Cox's avatar
Russ Cox committed
734 735 736 737 738
//go:linkname reflect_unsafe_New reflect.unsafe_New
func reflect_unsafe_New(typ *_type) unsafe.Pointer {
	return newobject(typ)
}

739 740
// implementation of make builtin for slices
func newarray(typ *_type, n uintptr) unsafe.Pointer {
741
	flags := uint32(0)
742 743 744
	if typ.kind&kindNoPointers != 0 {
		flags |= flagNoScan
	}
745
	if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
746 747
		panic("runtime: allocation size out of range")
	}
748
	return mallocgc(uintptr(typ.size)*n, typ, flags)
749 750
}

Russ Cox's avatar
Russ Cox committed
751 752 753 754 755
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer {
	return newarray(typ, n)
}

756 757 758
// rawmem returns a chunk of pointerless memory.  It is
// not zeroed.
func rawmem(size uintptr) unsafe.Pointer {
759
	return mallocgc(size, nil, flagNoScan|flagNoZero)
760 761
}

762 763 764 765 766 767 768 769 770
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
	c := mp.mcache
	rate := MemProfileRate
	if size < uintptr(rate) {
		// pick next profile time
		// If you change this, also change allocmcache.
		if rate > 0x3fffffff { // make 2*rate not overflow
			rate = 0x3fffffff
		}
771
		next := int32(fastrand1()) % (2 * int32(rate))
772 773 774 775 776 777 778 779 780
		// Subtract the "remainder" of the current allocation.
		// Otherwise objects that are close in size to sampling rate
		// will be under-sampled, because we consistently discard this remainder.
		next -= (int32(size) - c.next_sample)
		if next < 0 {
			next = 0
		}
		c.next_sample = next
	}
781 782

	mProf_Malloc(x, size)
783 784
}

785
type persistentAlloc struct {
786 787
	base unsafe.Pointer
	off  uintptr
788 789
}

790 791 792 793 794
var globalAlloc struct {
	mutex
	persistentAlloc
}

795 796 797 798
// Wrapper around sysAlloc that can allocate small chunks.
// There is no associated free operation.
// Intended for things like function/type/debug-related persistent data.
// If align is 0, uses default align (currently 8).
799
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
800 801 802 803 804
	const (
		chunk    = 256 << 10
		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
	)

805 806 807
	if size == 0 {
		throw("persistentalloc: size == 0")
	}
808 809
	if align != 0 {
		if align&(align-1) != 0 {
810
			throw("persistentalloc: align is not a power of 2")
811 812
		}
		if align > _PageSize {
813
			throw("persistentalloc: align is too large")
814 815 816 817 818 819
		}
	} else {
		align = 8
	}

	if size >= maxBlock {
820
		return sysAlloc(size, sysStat)
821 822
	}

823 824
	mp := acquirem()
	var persistent *persistentAlloc
825 826
	if mp != nil && mp.p != 0 {
		persistent = &mp.p.ptr().palloc
827 828 829 830
	} else {
		lock(&globalAlloc.mutex)
		persistent = &globalAlloc.persistentAlloc
	}
831
	persistent.off = round(persistent.off, align)
832
	if persistent.off+size > chunk || persistent.base == nil {
833 834
		persistent.base = sysAlloc(chunk, &memstats.other_sys)
		if persistent.base == nil {
835 836 837
			if persistent == &globalAlloc.persistentAlloc {
				unlock(&globalAlloc.mutex)
			}
838
			throw("runtime: cannot allocate memory")
839
		}
840
		persistent.off = 0
841
	}
842 843
	p := add(persistent.base, persistent.off)
	persistent.off += size
844 845 846 847
	releasem(mp)
	if persistent == &globalAlloc.persistentAlloc {
		unlock(&globalAlloc.mutex)
	}
848

849 850 851
	if sysStat != &memstats.other_sys {
		mSysStatInc(sysStat, size)
		mSysStatDec(&memstats.other_sys, size)
852 853 854
	}
	return p
}