Commit 4c2465d4 authored by Matthew Dempsky's avatar Matthew Dempsky

runtime: use unsafe.Pointer(x) instead of (unsafe.Pointer)(x)

This isn't C anymore.  No binary change to pkg/linux_amd64/runtime.a.

Change-Id: I24d66b0f5ac888f432b874aac684b1395e7c8345
Reviewed-on: https://go-review.googlesource.com/15903Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent 67722fea
...@@ -70,7 +70,7 @@ func dwrite(data unsafe.Pointer, len uintptr) { ...@@ -70,7 +70,7 @@ func dwrite(data unsafe.Pointer, len uintptr) {
return return
} }
write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf)) write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
if len >= bufSize { if len >= bufSize {
write(dumpfd, data, int32(len)) write(dumpfd, data, int32(len))
nbuf = 0 nbuf = 0
...@@ -85,7 +85,7 @@ func dwritebyte(b byte) { ...@@ -85,7 +85,7 @@ func dwritebyte(b byte) {
} }
func flush() { func flush() {
write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf)) write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
nbuf = 0 nbuf = 0
} }
......
...@@ -104,7 +104,7 @@ func unlock(l *mutex) { ...@@ -104,7 +104,7 @@ func unlock(l *mutex) {
} else { } else {
// Other M's are waiting for the lock. // Other M's are waiting for the lock.
// Dequeue an M. // Dequeue an M.
mp = (*m)((unsafe.Pointer)(v &^ locked)) mp = (*m)(unsafe.Pointer(v &^ locked))
if casuintptr(&l.key, v, mp.nextwaitm) { if casuintptr(&l.key, v, mp.nextwaitm) {
// Dequeued an M. Wake it. // Dequeued an M. Wake it.
semawakeup(mp) semawakeup(mp)
......
...@@ -397,7 +397,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer { ...@@ -397,7 +397,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
// TODO: It would be bad if part of the arena // TODO: It would be bad if part of the arena
// is reserved and part is not. // is reserved and part is not.
var reserved bool var reserved bool
p := uintptr(sysReserve((unsafe.Pointer)(h.arena_end), p_size, &reserved)) p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
if p == 0 { if p == 0 {
return nil return nil
} }
...@@ -415,7 +415,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer { ...@@ -415,7 +415,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
h.arena_reserved = reserved h.arena_reserved = reserved
} else { } else {
var stat uint64 var stat uint64
sysFree((unsafe.Pointer)(p), p_size, &stat) sysFree(unsafe.Pointer(p), p_size, &stat)
} }
} }
} }
...@@ -423,18 +423,18 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer { ...@@ -423,18 +423,18 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
if n <= uintptr(h.arena_end)-uintptr(h.arena_used) { if n <= uintptr(h.arena_end)-uintptr(h.arena_used) {
// Keep taking from our reservation. // Keep taking from our reservation.
p := h.arena_used p := h.arena_used
sysMap((unsafe.Pointer)(p), n, h.arena_reserved, &memstats.heap_sys) sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
mHeap_MapBits(h, p+n) mHeap_MapBits(h, p+n)
mHeap_MapSpans(h, p+n) mHeap_MapSpans(h, p+n)
h.arena_used = p + n h.arena_used = p + n
if raceenabled { if raceenabled {
racemapshadow((unsafe.Pointer)(p), n) racemapshadow(unsafe.Pointer(p), n)
} }
if uintptr(p)&(_PageSize-1) != 0 { if uintptr(p)&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc") throw("misrounded allocation in MHeap_SysAlloc")
} }
return (unsafe.Pointer)(p) return unsafe.Pointer(p)
} }
// If using 64-bit, our reservation is all we have. // If using 64-bit, our reservation is all we have.
...@@ -453,7 +453,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer { ...@@ -453,7 +453,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
if p < h.arena_start || uintptr(p)+p_size-uintptr(h.arena_start) >= _MaxArena32 { if p < h.arena_start || uintptr(p)+p_size-uintptr(h.arena_start) >= _MaxArena32 {
print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n") print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n")
sysFree((unsafe.Pointer)(p), p_size, &memstats.heap_sys) sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
return nil return nil
} }
...@@ -467,14 +467,14 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer { ...@@ -467,14 +467,14 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
h.arena_end = p_end h.arena_end = p_end
} }
if raceenabled { if raceenabled {
racemapshadow((unsafe.Pointer)(p), n) racemapshadow(unsafe.Pointer(p), n)
} }
} }
if uintptr(p)&(_PageSize-1) != 0 { if uintptr(p)&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc") throw("misrounded allocation in MHeap_SysAlloc")
} }
return (unsafe.Pointer)(p) return unsafe.Pointer(p)
} }
// base address for all 0-byte allocations // base address for all 0-byte allocations
......
...@@ -10,7 +10,7 @@ import "unsafe" ...@@ -10,7 +10,7 @@ import "unsafe"
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
v := (unsafe.Pointer)(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)) v := unsafe.Pointer(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
if uintptr(v) < 4096 { if uintptr(v) < 4096 {
return nil return nil
} }
...@@ -40,7 +40,7 @@ func sysFault(v unsafe.Pointer, n uintptr) { ...@@ -40,7 +40,7 @@ func sysFault(v unsafe.Pointer, n uintptr) {
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer { func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
*reserved = true *reserved = true
p := (unsafe.Pointer)(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)) p := unsafe.Pointer(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
if uintptr(p) < 4096 { if uintptr(p) < 4096 {
return nil return nil
} }
...@@ -53,7 +53,7 @@ const ( ...@@ -53,7 +53,7 @@ const (
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
mSysStatInc(sysStat, n) mSysStatInc(sysStat, n)
p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)) p := unsafe.Pointer(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
if uintptr(p) == _ENOMEM { if uintptr(p) == _ENOMEM {
throw("runtime: out of memory") throw("runtime: out of memory")
} }
......
...@@ -68,7 +68,7 @@ func fixAlloc_Alloc(f *fixalloc) unsafe.Pointer { ...@@ -68,7 +68,7 @@ func fixAlloc_Alloc(f *fixalloc) unsafe.Pointer {
f.nchunk = _FixAllocChunk f.nchunk = _FixAllocChunk
} }
v := (unsafe.Pointer)(f.chunk) v := unsafe.Pointer(f.chunk)
if f.first != nil { if f.first != nil {
fn := *(*func(unsafe.Pointer, unsafe.Pointer))(unsafe.Pointer(&f.first)) fn := *(*func(unsafe.Pointer, unsafe.Pointer))(unsafe.Pointer(&f.first))
fn(f.arg, v) fn(f.arg, v)
......
...@@ -558,7 +558,7 @@ HaveSpan: ...@@ -558,7 +558,7 @@ HaveSpan:
throw("still in list") throw("still in list")
} }
if s.npreleased > 0 { if s.npreleased > 0 {
sysUsed((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift) sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
memstats.heap_released -= uint64(s.npreleased << _PageShift) memstats.heap_released -= uint64(s.npreleased << _PageShift)
s.npreleased = 0 s.npreleased = 0
} }
...@@ -776,7 +776,7 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi ...@@ -776,7 +776,7 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi
h_spans[p] = s h_spans[p] = s
mSpanList_Remove(t) mSpanList_Remove(t)
t.state = _MSpanDead t.state = _MSpanDead
fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t)) fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
} }
} }
if (p+s.npages)*ptrSize < h.spans_mapped { if (p+s.npages)*ptrSize < h.spans_mapped {
...@@ -788,7 +788,7 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi ...@@ -788,7 +788,7 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi
h_spans[p+s.npages-1] = s h_spans[p+s.npages-1] = s
mSpanList_Remove(t) mSpanList_Remove(t)
t.state = _MSpanDead t.state = _MSpanDead
fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t)) fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
} }
} }
...@@ -821,7 +821,7 @@ func scavengelist(list *mspan, now, limit uint64) uintptr { ...@@ -821,7 +821,7 @@ func scavengelist(list *mspan, now, limit uint64) uintptr {
memstats.heap_released += uint64(released) memstats.heap_released += uint64(released)
sumreleased += released sumreleased += released
s.npreleased = s.npages s.npreleased = s.npages
sysUnused((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift) sysUnused(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
} }
} }
return sumreleased return sumreleased
...@@ -1064,7 +1064,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p ...@@ -1064,7 +1064,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p
// There was an old finalizer // There was an old finalizer
lock(&mheap_.speciallock) lock(&mheap_.speciallock)
fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s)) fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(s))
unlock(&mheap_.speciallock) unlock(&mheap_.speciallock)
return false return false
} }
...@@ -1076,7 +1076,7 @@ func removefinalizer(p unsafe.Pointer) { ...@@ -1076,7 +1076,7 @@ func removefinalizer(p unsafe.Pointer) {
return // there wasn't a finalizer to remove return // there wasn't a finalizer to remove
} }
lock(&mheap_.speciallock) lock(&mheap_.speciallock)
fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s)) fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(s))
unlock(&mheap_.speciallock) unlock(&mheap_.speciallock)
} }
...@@ -1107,14 +1107,14 @@ func freespecial(s *special, p unsafe.Pointer, size uintptr, freed bool) bool { ...@@ -1107,14 +1107,14 @@ func freespecial(s *special, p unsafe.Pointer, size uintptr, freed bool) bool {
sf := (*specialfinalizer)(unsafe.Pointer(s)) sf := (*specialfinalizer)(unsafe.Pointer(s))
queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot) queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
lock(&mheap_.speciallock) lock(&mheap_.speciallock)
fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(sf)) fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(sf))
unlock(&mheap_.speciallock) unlock(&mheap_.speciallock)
return false // don't free p until finalizer is done return false // don't free p until finalizer is done
case _KindSpecialProfile: case _KindSpecialProfile:
sp := (*specialprofile)(unsafe.Pointer(s)) sp := (*specialprofile)(unsafe.Pointer(s))
mProf_Free(sp.b, size, freed) mProf_Free(sp.b, size, freed)
lock(&mheap_.speciallock) lock(&mheap_.speciallock)
fixAlloc_Free(&mheap_.specialprofilealloc, (unsafe.Pointer)(sp)) fixAlloc_Free(&mheap_.specialprofilealloc, unsafe.Pointer(sp))
unlock(&mheap_.speciallock) unlock(&mheap_.speciallock)
return true return true
default: default:
......
...@@ -164,7 +164,7 @@ func postnote(pid uint64, msg []byte) int { ...@@ -164,7 +164,7 @@ func postnote(pid uint64, msg []byte) int {
return -1 return -1
} }
len := findnull(&msg[0]) len := findnull(&msg[0])
if write(uintptr(fd), (unsafe.Pointer)(&msg[0]), int32(len)) != int64(len) { if write(uintptr(fd), unsafe.Pointer(&msg[0]), int32(len)) != int64(len) {
closefd(fd) closefd(fd)
return -1 return -1
} }
......
...@@ -417,7 +417,7 @@ func gopanic(e interface{}) { ...@@ -417,7 +417,7 @@ func gopanic(e interface{}) {
// Record the panic that is running the defer. // Record the panic that is running the defer.
// If there is a new panic during the deferred call, that panic // If there is a new panic during the deferred call, that panic
// will find d in the list and will mark d._panic (this panic) aborted. // will find d in the list and will mark d._panic (this panic) aborted.
d._panic = (*_panic)(noescape((unsafe.Pointer)(&p))) d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
p.argp = unsafe.Pointer(getargp(0)) p.argp = unsafe.Pointer(getargp(0))
reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
......
...@@ -100,7 +100,7 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -100,7 +100,7 @@ func stackpoolalloc(order uint8) gclinkptr {
// Adds stack x to the free pool. Must be called with stackpoolmu held. // Adds stack x to the free pool. Must be called with stackpoolmu held.
func stackpoolfree(x gclinkptr, order uint8) { func stackpoolfree(x gclinkptr, order uint8) {
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x)) s := mHeap_Lookup(&mheap_, unsafe.Pointer(x))
if s.state != _MSpanStack { if s.state != _MSpanStack {
throw("freeing stack not in a stack span") throw("freeing stack not in a stack span")
} }
...@@ -251,13 +251,13 @@ func stackalloc(n uint32) (stack, []stkbar) { ...@@ -251,13 +251,13 @@ func stackalloc(n uint32) (stack, []stkbar) {
c.stackcache[order].list = x.ptr().next c.stackcache[order].list = x.ptr().next
c.stackcache[order].size -= uintptr(n) c.stackcache[order].size -= uintptr(n)
} }
v = (unsafe.Pointer)(x) v = unsafe.Pointer(x)
} else { } else {
s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift) s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
if s == nil { if s == nil {
throw("out of memory") throw("out of memory")
} }
v = (unsafe.Pointer)(s.start << _PageShift) v = unsafe.Pointer(s.start << _PageShift)
} }
if raceenabled { if raceenabled {
...@@ -273,7 +273,7 @@ func stackalloc(n uint32) (stack, []stkbar) { ...@@ -273,7 +273,7 @@ func stackalloc(n uint32) (stack, []stkbar) {
func stackfree(stk stack, n uintptr) { func stackfree(stk stack, n uintptr) {
gp := getg() gp := getg()
v := (unsafe.Pointer)(stk.lo) v := unsafe.Pointer(stk.lo)
if n&(n-1) != 0 { if n&(n-1) != 0 {
throw("stack not a power of 2") throw("stack not a power of 2")
} }
...@@ -545,7 +545,7 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { ...@@ -545,7 +545,7 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
} }
func adjustctxt(gp *g, adjinfo *adjustinfo) { func adjustctxt(gp *g, adjinfo *adjustinfo) {
adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt)) adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
} }
func adjustdefers(gp *g, adjinfo *adjustinfo) { func adjustdefers(gp *g, adjinfo *adjustinfo) {
...@@ -555,30 +555,30 @@ func adjustdefers(gp *g, adjinfo *adjustinfo) { ...@@ -555,30 +555,30 @@ func adjustdefers(gp *g, adjinfo *adjustinfo) {
// Adjust pointers in the Defer structs. // Adjust pointers in the Defer structs.
// Defer structs themselves are never on the stack. // Defer structs themselves are never on the stack.
for d := gp._defer; d != nil; d = d.link { for d := gp._defer; d != nil; d = d.link {
adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn)) adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
adjustpointer(adjinfo, (unsafe.Pointer)(&d.sp)) adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic)) adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
} }
} }
func adjustpanics(gp *g, adjinfo *adjustinfo) { func adjustpanics(gp *g, adjinfo *adjustinfo) {
// Panics are on stack and already adjusted. // Panics are on stack and already adjusted.
// Update pointer to head of list in G. // Update pointer to head of list in G.
adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic)) adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
} }
func adjustsudogs(gp *g, adjinfo *adjustinfo) { func adjustsudogs(gp *g, adjinfo *adjustinfo) {
// the data elements pointed to by a SudoG structure // the data elements pointed to by a SudoG structure
// might be in the stack. // might be in the stack.
for s := gp.waiting; s != nil; s = s.waitlink { for s := gp.waiting; s != nil; s = s.waitlink {
adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem)) adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone)) adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
} }
} }
func adjuststkbar(gp *g, adjinfo *adjustinfo) { func adjuststkbar(gp *g, adjinfo *adjustinfo) {
for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ { for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
adjustpointer(adjinfo, (unsafe.Pointer)(&gp.stkbar[i].savedLRPtr)) adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
} }
} }
...@@ -817,11 +817,11 @@ func nilfunc() { ...@@ -817,11 +817,11 @@ func nilfunc() {
func gostartcallfn(gobuf *gobuf, fv *funcval) { func gostartcallfn(gobuf *gobuf, fv *funcval) {
var fn unsafe.Pointer var fn unsafe.Pointer
if fv != nil { if fv != nil {
fn = (unsafe.Pointer)(fv.fn) fn = unsafe.Pointer(fv.fn)
} else { } else {
fn = unsafe.Pointer(funcPC(nilfunc)) fn = unsafe.Pointer(funcPC(nilfunc))
} }
gostartcall(gobuf, fn, (unsafe.Pointer)(fv)) gostartcall(gobuf, fn, unsafe.Pointer(fv))
} }
// Maybe shrink the stack being used by gp. // Maybe shrink the stack being used by gp.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment