Commit d6098e42 authored by Keith Randall's avatar Keith Randall

cmd/compile: intrinsify sync/atomic for amd64

Uses the same implementation as runtime/internal/atomic.

Reorganize the intrinsic detector to make it more table-driven.

Also works on amd64p32.

Change-Id: I7a5238951d6018d7d5d1bc01f339f6ee9282b2d0
Reviewed-on: https://go-review.googlesource.com/28076Reviewed-by: default avatarCherry Zhang <cherryyz@google.com>
parent adb1e67f
This diff is collapsed.
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Note: some of these functions are semantically inlined
// by the compiler (in src/cmd/compile/internal/gc/ssa.go).
// +build !race // +build !race
#include "textflag.h" #include "textflag.h"
......
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Note: some of these functions are semantically inlined
// by the compiler (in src/cmd/compile/internal/gc/ssa.go).
#include "textflag.h" #include "textflag.h"
TEXT ·SwapInt32(SB),NOSPLIT,$0-12 TEXT ·SwapInt32(SB),NOSPLIT,$0-12
...@@ -50,9 +53,6 @@ TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0-25 ...@@ -50,9 +53,6 @@ TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0-25
TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-25 TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-25
MOVL addr+0(FP), BX MOVL addr+0(FP), BX
TESTL $7, BX
JZ 2(PC)
MOVL 0, BX // crash with nil ptr deref
MOVQ old+8(FP), AX MOVQ old+8(FP), AX
MOVQ new+16(FP), CX MOVQ new+16(FP), CX
LOCK LOCK
...@@ -81,9 +81,6 @@ TEXT ·AddInt64(SB),NOSPLIT,$0-24 ...@@ -81,9 +81,6 @@ TEXT ·AddInt64(SB),NOSPLIT,$0-24
TEXT ·AddUint64(SB),NOSPLIT,$0-24 TEXT ·AddUint64(SB),NOSPLIT,$0-24
MOVL addr+0(FP), BX MOVL addr+0(FP), BX
TESTL $7, BX
JZ 2(PC)
MOVL 0, BX // crash with nil ptr deref
MOVQ delta+8(FP), AX MOVQ delta+8(FP), AX
MOVQ AX, CX MOVQ AX, CX
LOCK LOCK
...@@ -106,9 +103,6 @@ TEXT ·LoadInt64(SB),NOSPLIT,$0-16 ...@@ -106,9 +103,6 @@ TEXT ·LoadInt64(SB),NOSPLIT,$0-16
TEXT ·LoadUint64(SB),NOSPLIT,$0-16 TEXT ·LoadUint64(SB),NOSPLIT,$0-16
MOVL addr+0(FP), AX MOVL addr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
MOVL 0, AX // crash with nil ptr deref
MOVQ 0(AX), AX MOVQ 0(AX), AX
MOVQ AX, val+8(FP) MOVQ AX, val+8(FP)
RET RET
...@@ -136,9 +130,6 @@ TEXT ·StoreInt64(SB),NOSPLIT,$0-16 ...@@ -136,9 +130,6 @@ TEXT ·StoreInt64(SB),NOSPLIT,$0-16
TEXT ·StoreUint64(SB),NOSPLIT,$0-16 TEXT ·StoreUint64(SB),NOSPLIT,$0-16
MOVL addr+0(FP), BX MOVL addr+0(FP), BX
TESTL $7, BX
JZ 2(PC)
MOVL 0, BX // crash with nil ptr deref
MOVQ val+8(FP), AX MOVQ val+8(FP), AX
XCHGQ AX, 0(BX) XCHGQ AX, 0(BX)
RET RET
......
...@@ -1392,6 +1392,10 @@ func TestUnaligned64(t *testing.T) { ...@@ -1392,6 +1392,10 @@ func TestUnaligned64(t *testing.T) {
if unsafe.Sizeof(int(0)) != 4 { if unsafe.Sizeof(int(0)) != 4 {
t.Skip("test only runs on 32-bit systems") t.Skip("test only runs on 32-bit systems")
} }
if runtime.GOARCH == "amd64p32" {
// amd64p32 can handle unaligned atomics.
t.Skip("test not needed on amd64p32")
}
x := make([]uint32, 4) x := make([]uint32, 4)
p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment