Commit 72180c3b authored by Michael Hudson-Doyle's avatar Michael Hudson-Doyle

cmd/internal/obj, cmd/link, runtime: native-ish support for tls on arm64

Fixes #10560

Change-Id: Iedffd9c236c4fbb386c3afc52c5a1457f96ef122
Reviewed-on: https://go-review.googlesource.com/13991Reviewed-by: default avatarDavid Crawshaw <crawshaw@golang.org>
parent 492a62e9
...@@ -321,6 +321,7 @@ const ( ...@@ -321,6 +321,7 @@ const (
C_LOREG C_LOREG
C_ADDR // TODO(aram): explain difference from C_VCONADDR C_ADDR // TODO(aram): explain difference from C_VCONADDR
C_TLS // TLS var, i.e. memory address containing offset for the var
C_ROFF // register offset (including register extended) C_ROFF // register offset (including register extended)
C_GOK C_GOK
......
...@@ -55,6 +55,7 @@ var cnames7 = []string{ ...@@ -55,6 +55,7 @@ var cnames7 = []string{
"UOREG64K", "UOREG64K",
"LOREG", "LOREG",
"ADDR", "ADDR",
"TLS",
"ROFF", "ROFF",
"GOK", "GOK",
"TEXTSIZE", "TEXTSIZE",
......
...@@ -270,6 +270,7 @@ var optab = []Optab{ ...@@ -270,6 +270,7 @@ var optab = []Optab{
{AMOVH, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0}, {AMOVH, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0},
{AMOVW, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0}, {AMOVW, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0},
{AMOVD, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0}, {AMOVD, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0},
{AMOVD, C_TLS, C_NONE, C_REG, 69, 4, 0, 0, 0},
{AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0}, {AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0},
{AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0}, {AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0},
{AMADD, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0}, {AMADD, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0},
...@@ -968,6 +969,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ...@@ -968,6 +969,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
} }
ctxt.Instoffset = a.Offset ctxt.Instoffset = a.Offset
if a.Sym != nil { // use relocation if a.Sym != nil { // use relocation
if a.Sym.Type == obj.STLSBSS {
return C_TLS
}
return C_ADDR return C_ADDR
} }
return C_LEXT return C_LEXT
...@@ -2753,6 +2757,18 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { ...@@ -2753,6 +2757,18 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
rel.Add = p.From.Offset rel.Add = p.From.Offset
rel.Type = obj.R_ADDRARM64 rel.Type = obj.R_ADDRARM64
case 69: /* movd $tlsvar, reg -> movz reg, 0 + reloc */
o1 = opirr(ctxt, AMOVZ)
o1 |= uint32(p.To.Reg & 31)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Type = obj.R_ARM64_TLS_LE
if p.From.Offset != 0 {
ctxt.Diag("invalid offset on MOVW $tlsvar")
}
// This is supposed to be something that stops execution. // This is supposed to be something that stops execution.
// It's not supposed to be reached, ever, but if it is, we'd // It's not supposed to be reached, ever, but if it is, we'd
// like to be able to tell how we got there. Assemble as // like to be able to tell how we got there. Assemble as
......
...@@ -432,6 +432,20 @@ const ( ...@@ -432,6 +432,20 @@ const (
R_USEFIELD R_USEFIELD
R_POWER_TOC R_POWER_TOC
R_GOTPCREL R_GOTPCREL
// Platform dependent relocations. Architectures with fixed width instructions
// have the inherent issue that a 32-bit (or 64-bit!) displacement cannot be
// stuffed into a 32-bit instruction, so an address needs to be spread across
// several instructions, and in turn this requires a sequence of relocations, each
// updating a part of an instruction. This leads to relocation codes that are
// inherently processor specific.
// Arm64.
// Set a MOV[NZ] immediate field to bits [15:0] of the offset from the thread
// local base to the thread local variable defined by the referenced (thread
// local) symbol. Error if the offset does not fit into 16 bits.
R_ARM64_TLS_LE
) )
type Auto struct { type Auto struct {
......
...@@ -73,6 +73,9 @@ func elfreloc1(r *ld.Reloc, sectoff int64) int { ...@@ -73,6 +73,9 @@ func elfreloc1(r *ld.Reloc, sectoff int64) int {
ld.Thearch.Vput(uint64(sectoff + 4)) ld.Thearch.Vput(uint64(sectoff + 4))
ld.Thearch.Vput(ld.R_AARCH64_ADD_ABS_LO12_NC | uint64(elfsym)<<32) ld.Thearch.Vput(ld.R_AARCH64_ADD_ABS_LO12_NC | uint64(elfsym)<<32)
case obj.R_ARM64_TLS_LE:
ld.Thearch.Vput(ld.R_AARCH64_TLSLE_MOVW_TPREL_G0 | uint64(elfsym)<<32)
case obj.R_CALLARM64: case obj.R_CALLARM64:
if r.Siz != 4 { if r.Siz != 4 {
return -1 return -1
...@@ -225,7 +228,8 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { ...@@ -225,7 +228,8 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
return 0 return 0
case obj.R_CALLARM64: case obj.R_CALLARM64,
obj.R_ARM64_TLS_LE:
r.Done = 0 r.Done = 0
r.Xsym = r.Sym r.Xsym = r.Sym
r.Xadd = r.Add r.Xadd = r.Add
...@@ -269,6 +273,20 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { ...@@ -269,6 +273,20 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
} }
return 0 return 0
case obj.R_ARM64_TLS_LE:
r.Done = 0
if ld.HEADTYPE != obj.Hlinux {
ld.Diag("TLS reloc on unsupported OS %s", ld.Headstr(int(ld.HEADTYPE)))
}
// The TCB is two pointers. This is not documented anywhere, but is
// de facto part of the ABI.
v := r.Sym.Value + int64(2*ld.Thearch.Ptrsize)
if v < 0 || v >= 32678 {
ld.Diag("TLS offset out of range %d", v)
}
*val |= v << 5
return 0
case obj.R_CALLARM64: case obj.R_CALLARM64:
t := (ld.Symaddr(r.Sym) + r.Add) - (s.Value + int64(r.Off)) t := (ld.Symaddr(r.Sym) + r.Add) - (s.Value + int64(r.Off))
if t >= 1<<27 || t < -1<<27 { if t >= 1<<27 || t < -1<<27 {
......
This diff is collapsed.
...@@ -38,7 +38,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 ...@@ -38,7 +38,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
#ifdef TLSG_IS_VARIABLE #ifdef TLSG_IS_VARIABLE
MOVD $runtime·tls_g(SB), R2 // arg 2: &tls_g MOVD $runtime·tls_g(SB), R2 // arg 2: &tls_g
#else #else
MOVD 0, R2 // arg 2: not used when using platform's TLS MOVD $0, R2 // arg 2: not used when using platform's TLS
#endif #endif
MOVD $setg_gcc<>(SB), R1 // arg 1: setg MOVD $setg_gcc<>(SB), R1 // arg 1: setg
MOVD g, R0 // arg 0: G MOVD g, R0 // arg 0: G
......
...@@ -18,13 +18,8 @@ TEXT runtime·load_g(SB),NOSPLIT,$0 ...@@ -18,13 +18,8 @@ TEXT runtime·load_g(SB),NOSPLIT,$0
// Darwin sometimes returns unaligned pointers // Darwin sometimes returns unaligned pointers
AND $0xfffffffffffffff8, R0 AND $0xfffffffffffffff8, R0
#endif #endif
#ifdef TLSG_IS_VARIABLE
MOVD runtime·tls_g(SB), R27 MOVD runtime·tls_g(SB), R27
ADD R27, R0 ADD R27, R0
#else
// TODO(minux): use real TLS relocation, instead of hard-code for Linux
ADD $0x10, R0
#endif
MOVD 0(R0), g MOVD 0(R0), g
nocgo: nocgo:
...@@ -40,13 +35,8 @@ TEXT runtime·save_g(SB),NOSPLIT,$0 ...@@ -40,13 +35,8 @@ TEXT runtime·save_g(SB),NOSPLIT,$0
// Darwin sometimes returns unaligned pointers // Darwin sometimes returns unaligned pointers
AND $0xfffffffffffffff8, R0 AND $0xfffffffffffffff8, R0
#endif #endif
#ifdef TLSG_IS_VARIABLE
MOVD runtime·tls_g(SB), R27 MOVD runtime·tls_g(SB), R27
ADD R27, R0 ADD R27, R0
#else
// TODO(minux): use real TLS relocation, instead of hard-code for Linux
ADD $0x10, R0
#endif
MOVD g, 0(R0) MOVD g, 0(R0)
nocgo: nocgo:
...@@ -54,4 +44,6 @@ nocgo: ...@@ -54,4 +44,6 @@ nocgo:
#ifdef TLSG_IS_VARIABLE #ifdef TLSG_IS_VARIABLE
GLOBL runtime·tls_g+0(SB), NOPTR, $8 GLOBL runtime·tls_g+0(SB), NOPTR, $8
#else
GLOBL runtime·tls_g+0(SB), TLSBSS, $8
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment