Commit 7a71726b authored by Austin Clements's avatar Austin Clements

runtime: check alignment of 8-byte atomic loads and stores on 386

Currently, if we do an atomic{load,store}64 of an unaligned address on
386, we'll simply get a non-atomic load/store.  This has been the
source of myriad bugs, so add alignment checks to these two
operations.  These checks parallel the equivalent checks in
sync/atomic.

The alignment check is not necessary in cas64 because it uses a locked
instruction.  The CPU will either execute this atomically or raise an
alignment fault (#AC)---depending on the alignment check flag---either
of which is fine.

This also fixes the two places in the runtime that trip the new
checks.  One is in the runtime self-test and shouldn't have caused
real problems.  The other is in tickspersecond and could, in
principle, have caused a misread of the ticks per second during
initialization.

Change-Id: If1796667012a6154f64f5e71d043c7f5fb3dd050
Reviewed-on: https://go-review.googlesource.com/3521Reviewed-by: default avatarRuss Cox <rsc@golang.org>
parent 83c10b20
......@@ -555,6 +555,9 @@ TEXT runtime·atomicstore(SB), NOSPLIT, $0-8
// uint64 atomicload64(uint64 volatile* addr);
TEXT runtime·atomicload64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
MOVL 0, AX // crash with nil ptr deref
LEAL ret_lo+4(FP), BX
// MOVQ (%EAX), %MM0
BYTE $0x0f; BYTE $0x6f; BYTE $0x00
......@@ -567,6 +570,9 @@ TEXT runtime·atomicload64(SB), NOSPLIT, $0-12
// void runtime·atomicstore64(uint64 volatile* addr, uint64 v);
TEXT runtime·atomicstore64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
MOVL 0, AX // crash with nil ptr deref
// MOVQ and EMMS were introduced on the Pentium MMX.
// MOVQ 0x8(%ESP), %MM0
BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
......
......@@ -10,6 +10,7 @@ import _ "unsafe" // for go:linkname
var ticks struct {
lock mutex
pad uint32 // ensure 8-byte alignment of val on 386
val uint64
}
......
......@@ -95,45 +95,47 @@ func environ() []string {
return envs
}
// TODO: These should be locals in testAtomic64, but we don't 8-byte
// align stack variables on 386.
var test_z64, test_x64 uint64
func testAtomic64() {
var z64, x64 uint64
z64 = 42
x64 = 0
prefetcht0(uintptr(unsafe.Pointer(&z64)))
prefetcht1(uintptr(unsafe.Pointer(&z64)))
prefetcht2(uintptr(unsafe.Pointer(&z64)))
prefetchnta(uintptr(unsafe.Pointer(&z64)))
if cas64(&z64, x64, 1) {
test_z64 = 42
test_x64 = 0
prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
if cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if x64 != 0 {
if test_x64 != 0 {
throw("cas64 failed")
}
x64 = 42
if !cas64(&z64, x64, 1) {
test_x64 = 42
if !cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if x64 != 42 || z64 != 1 {
if test_x64 != 42 || test_z64 != 1 {
throw("cas64 failed")
}
if atomicload64(&z64) != 1 {
if atomicload64(&test_z64) != 1 {
throw("load64 failed")
}
atomicstore64(&z64, (1<<40)+1)
if atomicload64(&z64) != (1<<40)+1 {
atomicstore64(&test_z64, (1<<40)+1)
if atomicload64(&test_z64) != (1<<40)+1 {
throw("store64 failed")
}
if xadd64(&z64, (1<<40)+1) != (2<<40)+2 {
if xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
throw("xadd64 failed")
}
if atomicload64(&z64) != (2<<40)+2 {
if atomicload64(&test_z64) != (2<<40)+2 {
throw("xadd64 failed")
}
if xchg64(&z64, (3<<40)+3) != (2<<40)+2 {
if xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
throw("xchg64 failed")
}
if atomicload64(&z64) != (3<<40)+3 {
if atomicload64(&test_z64) != (3<<40)+3 {
throw("xchg64 failed")
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment