Commit 6dda7b2f authored by Austin Clements's avatar Austin Clements

runtime: don't hard-code physical page size

Now that the runtime fetches the true physical page size from the OS,
make the physical page size used by heap growth a variable instead of
a constant. This isn't used in any performance-critical paths, so it
shouldn't be an issue.

sys.PhysPageSize is also renamed to sys.DefaultPhysPageSize to make it
clear that it's not necessarily the true page size. There are no uses
of this constant any more, but we'll keep it around for now.

Updates #12480 and #10180.

Change-Id: I6c23b9df860db309c38c8287a703c53817754f03
Reviewed-on: https://go-review.googlesource.com/25022
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 276a52de
......@@ -8,7 +8,7 @@ const (
ArchFamily = I386
BigEndian = 0
CacheLineSize = 64
PhysPageSize = GoosNacl*65536 + (1-GoosNacl)*4096 // 4k normally; 64k on NaCl
DefaultPhysPageSize = GoosNacl*65536 + (1-GoosNacl)*4096 // 4k normally; 64k on NaCl
PCQuantum = 1
Int64Align = 4
HugePageSize = 1 << 21
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = AMD64
BigEndian = 0
CacheLineSize = 64
PhysPageSize = 4096
DefaultPhysPageSize = 4096
PCQuantum = 1
Int64Align = 8
HugePageSize = 1 << 21
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = AMD64
BigEndian = 0
CacheLineSize = 64
PhysPageSize = 65536*GoosNacl + 4096*(1-GoosNacl)
DefaultPhysPageSize = 65536*GoosNacl + 4096*(1-GoosNacl)
PCQuantum = 1
Int64Align = 8
HugePageSize = 1 << 21
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = ARM
BigEndian = 0
CacheLineSize = 32
PhysPageSize = 65536
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 4
HugePageSize = 0
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = ARM64
BigEndian = 0
CacheLineSize = 32
PhysPageSize = 65536
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
HugePageSize = 0
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = MIPS64
BigEndian = 1
CacheLineSize = 32
PhysPageSize = 16384
DefaultPhysPageSize = 16384
PCQuantum = 4
Int64Align = 8
HugePageSize = 0
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = MIPS64
BigEndian = 0
CacheLineSize = 32
PhysPageSize = 16384
DefaultPhysPageSize = 16384
PCQuantum = 4
Int64Align = 8
HugePageSize = 0
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = PPC64
BigEndian = 1
CacheLineSize = 128
PhysPageSize = 65536
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
HugePageSize = 0
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = PPC64
BigEndian = 0
CacheLineSize = 128
PhysPageSize = 65536
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
HugePageSize = 0
......
......@@ -8,7 +8,7 @@ const (
ArchFamily = S390X
BigEndian = 1
CacheLineSize = 256
PhysPageSize = 4096
DefaultPhysPageSize = 4096
PCQuantum = 2
Int64Align = 8
HugePageSize = 0
......
......@@ -230,13 +230,13 @@ func mallocinit() {
// The OS init code failed to fetch the physical page size.
throw("failed to get system page size")
}
if sys.PhysPageSize < physPageSize {
print("runtime: kernel page size (", physPageSize, ") is larger than runtime page size (", sys.PhysPageSize, ")\n")
throw("bad kernel page size")
if physPageSize < minPhysPageSize {
print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
throw("bad system page size")
}
if sys.PhysPageSize%physPageSize != 0 {
print("runtime: runtime page size (", sys.PhysPageSize, ") is not a multiple of kernel page size (", physPageSize, ")\n")
throw("bad kernel page size")
if physPageSize&(physPageSize-1) != 0 {
print("system page size (", physPageSize, ") must be a power of 2\n")
throw("bad system page size")
}
var p, bitmapSize, spansSize, pSize, limit uintptr
......
......@@ -151,7 +151,7 @@ func (h *mheap) mapBits(arena_used uintptr) {
n := (arena_used - mheap_.arena_start) / heapBitmapScale
n = round(n, bitmapChunk)
n = round(n, sys.PhysPageSize)
n = round(n, physPageSize)
if h.bitmap_mapped >= n {
return
}
......
......@@ -22,17 +22,14 @@ const (
var addrspace_vec [1]byte
func addrspace_free(v unsafe.Pointer, n uintptr) bool {
// Step by the minimum possible physical page size. This is
// safe even if we have the wrong physical page size; mincore
// will just return EINVAL for unaligned addresses.
for off := uintptr(0); off < n; off += minPhysPageSize {
for off := uintptr(0); off < n; off += physPageSize {
// Use a length of 1 byte, which the kernel will round
// up to one physical page regardless of the true
// physical page size.
errval := mincore(unsafe.Pointer(uintptr(v)+off), 1, &addrspace_vec[0])
if errval == -_EINVAL {
// Address is not a multiple of the physical
// page size. That's fine.
// page size. Shouldn't happen, but just ignore it.
continue
}
// ENOMEM means unmapped, which is what we want.
......@@ -138,7 +135,7 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
}
}
if uintptr(v)&(sys.PhysPageSize-1) != 0 || n&(sys.PhysPageSize-1) != 0 {
if uintptr(v)&(physPageSize-1) != 0 || n&(physPageSize-1) != 0 {
// madvise will round this to any physical page
// *covered* by this range, so an unaligned madvise
// will release more memory than intended.
......
......@@ -401,7 +401,7 @@ func (h *mheap) mapSpans(arena_used uintptr) {
n := arena_used
n -= h.arena_start
n = n / _PageSize * sys.PtrSize
n = round(n, sys.PhysPageSize)
n = round(n, physPageSize)
if h.spans_mapped >= n {
return
}
......@@ -909,14 +909,14 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
start := s.base()
end := start + s.npages<<_PageShift
if sys.PhysPageSize > _PageSize {
if physPageSize > _PageSize {
// We can only release pages in
// PhysPageSize blocks, so round start
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + sys.PhysPageSize - 1) &^ (sys.PhysPageSize - 1)
end &^= sys.PhysPageSize - 1
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
......@@ -926,7 +926,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
len := end - start
released := len - (s.npreleased << _PageShift)
if sys.PhysPageSize > _PageSize && released == 0 {
if physPageSize > _PageSize && released == 0 {
continue
}
memstats.heap_released += uint64(released)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment