reflect.go 50.5 KB
Newer Older
1 2 3 4 5 6 7
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package gc

import (
8
	"cmd/compile/internal/types"
9
	"cmd/internal/gcprog"
10
	"cmd/internal/obj"
11
	"cmd/internal/objabi"
12
	"cmd/internal/src"
13
	"fmt"
14
	"os"
15
	"sort"
16
	"strings"
17
	"sync"
18 19
)

20
type itabEntry struct {
21
	t, itype *types.Type
22
	lsym     *obj.LSym // symbol of the itab itself
23 24 25

	// symbols of each method in
	// the itab, sorted by byte offset;
26
	// filled in by peekitabs
27
	entries []*obj.LSym
28 29
}

30
type ptabEntry struct {
31 32
	s *types.Sym
	t *types.Type
33 34
}

35
// runtime interface and reflection data structures
36
var (
37
	signatmu    sync.Mutex // protects signatset and signatslice
38
	signatset   = make(map[*types.Type]struct{})
39
	signatslice []*types.Type
40 41 42 43

	itabs []itabEntry
	ptabs []ptabEntry
)
44

45
type Sig struct {
46
	name  *types.Sym
47 48 49 50
	isym  *types.Sym
	tsym  *types.Sym
	type_ *types.Type
	mtype *types.Type
51 52
}

53
// Builds a type representing a Bucket structure for
54
// the given map type. This type is not visible to users -
55 56
// we include only enough information to generate a correct GC
// program for it.
57
// Make sure this stays in sync with runtime/map.go.
58
const (
59 60 61
	BUCKETSIZE  = 8
	MAXKEYSIZE  = 128
	MAXELEMSIZE = 128
62 63
)

Mikio Hara's avatar
Mikio Hara committed
64 65
func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{})
func imethodSize() int     { return 4 + 4 }        // Sizeof(runtime.imethod{})
66

67
func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
68 69 70
	if t.Sym == nil && len(methods(t)) == 0 {
		return 0
	}
71
	return 4 + 2 + 2 + 4 + 4
72 73
}

74 75
func makefield(name string, t *types.Type) *types.Field {
	f := types.NewField()
76
	f.Type = t
77
	f.Sym = (*types.Pkg)(nil).Lookup(name)
78 79 80
	return f
}

81 82
// bmap makes the map bucket type given the type of the map.
func bmap(t *types.Type) *types.Type {
83 84
	if t.MapType().Bucket != nil {
		return t.MapType().Bucket
85 86
	}

87
	bucket := types.New(TSTRUCT)
88
	keytype := t.Key()
89
	elemtype := t.Elem()
90
	dowidth(keytype)
91
	dowidth(elemtype)
92
	if keytype.Width > MAXKEYSIZE {
93
		keytype = types.NewPtr(keytype)
94
	}
95 96
	if elemtype.Width > MAXELEMSIZE {
		elemtype = types.NewPtr(elemtype)
97 98
	}

99
	field := make([]*types.Field, 0, 5)
100

101
	// The first field is: uint8 topbits[BUCKETSIZE].
102
	arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE)
103
	field = append(field, makefield("topbits", arr))
104

105
	arr = types.NewArray(keytype, BUCKETSIZE)
106
	arr.SetNoalg(true)
107 108
	keys := makefield("keys", arr)
	field = append(field, keys)
109

110
	arr = types.NewArray(elemtype, BUCKETSIZE)
111
	arr.SetNoalg(true)
112 113
	elems := makefield("elems", arr)
	field = append(field, elems)
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128

	// Make sure the overflow pointer is the last memory in the struct,
	// because the runtime assumes it can use size-ptrSize as the
	// offset of the overflow pointer. We double-check that property
	// below once the offsets and size are computed.
	//
	// BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point.
	// On 32-bit systems, the max alignment is 32-bit, and the
	// overflow pointer will add another 32-bit field, and the struct
	// will end with no padding.
	// On 64-bit systems, the max alignment is 64-bit, and the
	// overflow pointer will add another 64-bit field, and the struct
	// will end with no padding.
	// On nacl/amd64p32, however, the max alignment is 64-bit,
	// but the overflow pointer will add only a 32-bit field,
129
	// so if the struct needs 64-bit padding (because a key or elem does)
130 131
	// then it would end with an extra 32-bit padding field.
	// Preempt that by emitting the padding here.
132
	if int(elemtype.Align) > Widthptr || int(keytype.Align) > Widthptr {
133
		field = append(field, makefield("pad", types.Types[TUINTPTR]))
134 135
	}

136
	// If keys and elems have no pointers, the map implementation
137 138 139 140
	// can keep a list of overflow pointers on the side so that
	// buckets can be marked as having no pointers.
	// Arrange for the bucket to have no pointers by changing
	// the type of the overflow field to uintptr in this case.
141
	// See comment on hmap.overflow in runtime/map.go.
142
	otyp := types.NewPtr(bucket)
143
	if !types.Haspointers(elemtype) && !types.Haspointers(keytype) {
144
		otyp = types.Types[TUINTPTR]
145
	}
146 147
	overflow := makefield("overflow", otyp)
	field = append(field, overflow)
148 149

	// link up fields
150
	bucket.SetNoalg(true)
151
	bucket.SetFields(field[:])
152 153
	dowidth(bucket)

154
	// Check invariants that map code depends on.
155 156 157
	if !IsComparable(t.Key()) {
		Fatalf("unsupported map key type for %v", t)
	}
158 159 160 161 162 163
	if BUCKETSIZE < 8 {
		Fatalf("bucket size too small for proper alignment")
	}
	if keytype.Align > BUCKETSIZE {
		Fatalf("key align too big for %v", t)
	}
164 165
	if elemtype.Align > BUCKETSIZE {
		Fatalf("elem align too big for %v", t)
166 167 168 169
	}
	if keytype.Width > MAXKEYSIZE {
		Fatalf("key size to large for %v", t)
	}
170 171
	if elemtype.Width > MAXELEMSIZE {
		Fatalf("elem size to large for %v", t)
172 173 174 175
	}
	if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
		Fatalf("key indirect incorrect for %v", t)
	}
176 177
	if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() {
		Fatalf("elem indirect incorrect for %v", t)
178 179 180 181
	}
	if keytype.Width%int64(keytype.Align) != 0 {
		Fatalf("key size not a multiple of key align for %v", t)
	}
182 183
	if elemtype.Width%int64(elemtype.Align) != 0 {
		Fatalf("elem size not a multiple of elem align for %v", t)
184 185 186 187
	}
	if bucket.Align%keytype.Align != 0 {
		Fatalf("bucket align not multiple of key align %v", t)
	}
188 189
	if bucket.Align%elemtype.Align != 0 {
		Fatalf("bucket align not multiple of elem align %v", t)
190 191
	}
	if keys.Offset%int64(keytype.Align) != 0 {
192
		Fatalf("bad alignment of keys in bmap for %v", t)
193
	}
194 195
	if elems.Offset%int64(elemtype.Align) != 0 {
		Fatalf("bad alignment of elems in bmap for %v", t)
196 197
	}

198 199
	// Double-check that overflow field is final memory in struct,
	// with no padding at end. See comment above.
200
	if overflow.Offset != bucket.Width-int64(Widthptr) {
201
		Fatalf("bad offset of overflow in bmap for %v", t)
202 203
	}

204
	t.MapType().Bucket = bucket
205

206
	bucket.StructType().Map = t
207 208 209
	return bucket
}

210
// hmap builds a type representing a Hmap structure for the given map type.
211
// Make sure this stays in sync with runtime/map.go.
212
func hmap(t *types.Type) *types.Type {
213 214
	if t.MapType().Hmap != nil {
		return t.MapType().Hmap
215 216
	}

217
	bmap := bmap(t)
218 219 220 221 222 223 224 225 226 227 228 229 230

	// build a struct:
	// type hmap struct {
	//    count      int
	//    flags      uint8
	//    B          uint8
	//    noverflow  uint16
	//    hash0      uint32
	//    buckets    *bmap
	//    oldbuckets *bmap
	//    nevacuate  uintptr
	//    extra      unsafe.Pointer // *mapextra
	// }
231
	// must match runtime/map.go:hmap.
232 233 234 235 236
	fields := []*types.Field{
		makefield("count", types.Types[TINT]),
		makefield("flags", types.Types[TUINT8]),
		makefield("B", types.Types[TUINT8]),
		makefield("noverflow", types.Types[TUINT16]),
237 238
		makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP.
		makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
239
		makefield("oldbuckets", types.NewPtr(bmap)),
240
		makefield("nevacuate", types.Types[TUINTPTR]),
241
		makefield("extra", types.Types[TUNSAFEPTR]),
242 243
	}

244 245 246 247
	hmap := types.New(TSTRUCT)
	hmap.SetNoalg(true)
	hmap.SetFields(fields)
	dowidth(hmap)
248 249 250 251 252 253 254

	// The size of hmap should be 48 bytes on 64 bit
	// and 28 bytes on 32 bit platforms.
	if size := int64(8 + 5*Widthptr); hmap.Width != size {
		Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
	}

255 256 257
	t.MapType().Hmap = hmap
	hmap.StructType().Map = t
	return hmap
258 259
}

260
// hiter builds a type representing an Hiter structure for the given map type.
261
// Make sure this stays in sync with runtime/map.go.
262
func hiter(t *types.Type) *types.Type {
263 264
	if t.MapType().Hiter != nil {
		return t.MapType().Hiter
265 266
	}

267
	hmap := hmap(t)
268
	bmap := bmap(t)
269

270
	// build a struct:
271 272
	// type hiter struct {
	//    key         *Key
273
	//    elem        *Elem
274 275 276 277
	//    t           unsafe.Pointer // *MapType
	//    h           *hmap
	//    buckets     *bmap
	//    bptr        *bmap
278 279
	//    overflow    unsafe.Pointer // *[]*bmap
	//    oldoverflow unsafe.Pointer // *[]*bmap
280
	//    startBucket uintptr
281 282 283 284 285
	//    offset      uint8
	//    wrapped     bool
	//    B           uint8
	//    i           uint8
	//    bucket      uintptr
286 287
	//    checkBucket uintptr
	// }
288
	// must match runtime/map.go:hiter.
289
	fields := []*types.Field{
290 291
		makefield("key", types.NewPtr(t.Key())),   // Used in range.go for TMAP.
		makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
292 293 294 295
		makefield("t", types.Types[TUNSAFEPTR]),
		makefield("h", types.NewPtr(hmap)),
		makefield("buckets", types.NewPtr(bmap)),
		makefield("bptr", types.NewPtr(bmap)),
296 297
		makefield("overflow", types.Types[TUNSAFEPTR]),
		makefield("oldoverflow", types.Types[TUNSAFEPTR]),
298 299 300 301 302 303 304 305
		makefield("startBucket", types.Types[TUINTPTR]),
		makefield("offset", types.Types[TUINT8]),
		makefield("wrapped", types.Types[TBOOL]),
		makefield("B", types.Types[TUINT8]),
		makefield("i", types.Types[TUINT8]),
		makefield("bucket", types.Types[TUINTPTR]),
		makefield("checkBucket", types.Types[TUINTPTR]),
	}
306 307

	// build iterator struct holding the above fields
308 309 310 311 312 313
	hiter := types.New(TSTRUCT)
	hiter.SetNoalg(true)
	hiter.SetFields(fields)
	dowidth(hiter)
	if hiter.Width != int64(12*Widthptr) {
		Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
314
	}
315 316 317
	t.MapType().Hiter = hiter
	hiter.StructType().Map = t
	return hiter
318 319
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
// deferstruct makes a runtime._defer structure, with additional space for
// stksize bytes of args.
func deferstruct(stksize int64) *types.Type {
	makefield := func(name string, typ *types.Type) *types.Field {
		f := types.NewField()
		f.Type = typ
		// Unlike the global makefield function, this one needs to set Pkg
		// because these types might be compared (in SSA CSE sorting).
		// TODO: unify this makefield and the global one above.
		f.Sym = &types.Sym{Name: name, Pkg: localpkg}
		return f
	}
	argtype := types.NewArray(types.Types[TUINT8], stksize)
	argtype.SetNoalg(true)
	argtype.Width = stksize
	argtype.Align = 1
	// These fields must match the ones in runtime/runtime2.go:_defer and
	// cmd/compile/internal/gc/ssa.go:(*state).call.
	fields := []*types.Field{
		makefield("siz", types.Types[TUINT32]),
		makefield("started", types.Types[TBOOL]),
		makefield("heap", types.Types[TBOOL]),
		makefield("sp", types.Types[TUINTPTR]),
		makefield("pc", types.Types[TUINTPTR]),
		// Note: the types here don't really matter. Defer structures
		// are always scanned explicitly during stack copying and GC,
		// so we make them uintptr type even though they are real pointers.
		makefield("fn", types.Types[TUINTPTR]),
		makefield("_panic", types.Types[TUINTPTR]),
		makefield("link", types.Types[TUINTPTR]),
		makefield("args", argtype),
	}

	// build struct holding the above fields
	s := types.New(TSTRUCT)
	s.SetNoalg(true)
	s.SetFields(fields)
	s.Width = widstruct(s, s, 0, 1)
	s.Align = uint8(Widthptr)
	return s
}

362 363
// f is method type, with receiver.
// return function type, receiver as first argument (or not).
364
func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
365 366 367 368 369 370
	inLen := f.Params().Fields().Len()
	if receiver != nil {
		inLen++
	}
	in := make([]*Node, 0, inLen)

371
	if receiver != nil {
372
		d := anonfield(receiver)
373
		in = append(in, d)
374 375
	}

376
	for _, t := range f.Params().Fields().Slice() {
377
		d := anonfield(t.Type)
378
		d.SetIsDDD(t.IsDDD())
379
		in = append(in, d)
380 381
	}

382 383
	outLen := f.Results().Fields().Len()
	out := make([]*Node, 0, outLen)
384
	for _, t := range f.Results().Fields().Slice() {
385
		d := anonfield(t.Type)
386
		out = append(out, d)
387 388
	}

389
	t := functype(nil, in, out)
390
	if f.Nname() != nil {
391
		// Link to name of original method function.
392
		t.SetNname(f.Nname())
393 394 395 396 397
	}

	return t
}

398 399
// methods returns the methods of the non-interface type t, sorted by name.
// Generates stub functions as needed.
400
func methods(t *types.Type) []*Sig {
401
	// method type
402
	mt := methtype(t)
403 404 405 406 407 408 409

	if mt == nil {
		return nil
	}
	expandmeth(mt)

	// type stored in interface word
410
	it := t
411

412
	if !isdirectiface(it) {
413
		it = types.NewPtr(t)
414 415 416 417
	}

	// make list of methods for t,
	// generating code if necessary.
418
	var ms []*Sig
419
	for _, f := range mt.AllMethods().Slice() {
420
		if !f.IsMethod() {
421
			Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
422
		}
423
		if f.Type.Recv() == nil {
424
			Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
425
		}
426
		if f.Nointerface() {
427 428 429
			continue
		}

430
		method := f.Sym
431
		if method == nil {
432
			break
433 434 435 436 437 438
		}

		// get receiver type for this particular method.
		// if pointer receiver but non-pointer t and
		// this is not an embedded pointer inside a struct,
		// method does not apply.
439
		if !isMethodApplicable(t, f) {
440 441 442
			continue
		}

443 444 445 446 447 448
		sig := &Sig{
			name:  method,
			isym:  methodSym(it, method),
			tsym:  methodSym(t, method),
			type_: methodfunc(f.Type, t),
			mtype: methodfunc(f.Type, nil),
449
		}
450
		ms = append(ms, sig)
451

452 453
		this := f.Type.Recv().Type

454 455
		if !sig.isym.Siggen() {
			sig.isym.SetSiggen(true)
456
			if !types.Identical(this, it) {
457
				genwrapper(it, f, sig.isym)
458 459 460
			}
		}

461 462
		if !sig.tsym.Siggen() {
			sig.tsym.SetSiggen(true)
463
			if !types.Identical(this, t) {
464
				genwrapper(t, f, sig.tsym)
465 466 467 468
			}
		}
	}

469
	return ms
470 471
}

472
// imethods returns the methods of the interface type t, sorted by name.
473
func imethods(t *types.Type) []*Sig {
474
	var methods []*Sig
475
	for _, f := range t.Fields().Slice() {
476 477 478
		if f.Type.Etype != TFUNC || f.Sym == nil {
			continue
		}
479 480
		if f.Sym.IsBlank() {
			Fatalf("unexpected blank symbol in interface method set")
481
		}
482 483
		if n := len(methods); n > 0 {
			last := methods[n-1]
484 485
			if !last.name.Less(f.Sym) {
				Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
486
			}
487 488
		}

489 490 491 492
		sig := &Sig{
			name:  f.Sym,
			mtype: f.Type,
			type_: methodfunc(f.Type, nil),
493
		}
494
		methods = append(methods, sig)
495 496 497 498 499

		// NOTE(rsc): Perhaps an oversight that
		// IfaceType.Method is not in the reflect data.
		// Generate the method body, so that compiled
		// code can refer to it.
500
		isym := methodSym(t, f.Sym)
501 502
		if !isym.Siggen() {
			isym.SetSiggen(true)
503
			genwrapper(t, f, isym)
504 505 506
		}
	}

507
	return methods
508 509
}

510
func dimportpath(p *types.Pkg) {
511 512 513 514
	if p.Pathsym != nil {
		return
	}

515
	// If we are compiling the runtime package, there are two runtime packages around
516
	// -- localpkg and Runtimepkg. We don't want to produce import path symbols for
517 518 519 520 521
	// both of them, so just produce one for localpkg.
	if myimportpath == "runtime" && p == Runtimepkg {
		return
	}

522
	str := p.Path
523 524
	if p == localpkg {
		// Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
525
		str = myimportpath
526
	}
527

528
	s := Ctxt.Lookup("type..importpath." + p.Prefix + ".")
529
	ot := dnameData(s, 0, str, "", nil, false)
530
	ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
531
	p.Pathsym = s
532 533
}

534
func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
535
	if pkg == nil {
536
		return duintptr(s, ot, 0)
537 538
	}

539
	if pkg == localpkg && myimportpath == "" {
540 541
		// If we don't know the full import path of the package being compiled
		// (i.e. -p was not passed on the compiler command line), emit a reference to
542
		// type..importpath.""., which the linker will rewrite using the correct import path.
543
		// Every package that imports this one directly defines the symbol.
544
		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
545
		ns := Ctxt.Lookup(`type..importpath."".`)
546
		return dsymptr(s, ot, ns, 0)
547 548 549
	}

	dimportpath(pkg)
550
	return dsymptr(s, ot, pkg.Pathsym, 0)
551 552
}

553 554
// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
555
	if pkg == nil {
556
		return duint32(s, ot, 0)
557
	}
558 559 560
	if pkg == localpkg && myimportpath == "" {
		// If we don't know the full import path of the package being compiled
		// (i.e. -p was not passed on the compiler command line), emit a reference to
561
		// type..importpath.""., which the linker will rewrite using the correct import path.
562 563
		// Every package that imports this one directly defines the symbol.
		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
564
		ns := Ctxt.Lookup(`type..importpath."".`)
565
		return dsymptrOff(s, ot, ns)
566 567 568
	}

	dimportpath(pkg)
569
	return dsymptrOff(s, ot, pkg.Pathsym)
570 571 572
}

// dnameField dumps a reflect.name for a struct field.
573
func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
574
	if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
575
		Fatalf("package mismatch for %v", ft.Sym)
576
	}
577
	nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
578
	return dsymptr(lsym, ot, nsym, 0)
579 580
}

581
// dnameData writes the contents of a reflect.name into s at offset ot.
582
func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
	if len(name) > 1<<16-1 {
		Fatalf("name too long: %s", name)
	}
	if len(tag) > 1<<16-1 {
		Fatalf("tag too long: %s", tag)
	}

	// Encode name and tag. See reflect/type.go for details.
	var bits byte
	l := 1 + 2 + len(name)
	if exported {
		bits |= 1 << 0
	}
	if len(tag) > 0 {
		l += 2 + len(tag)
		bits |= 1 << 1
	}
	if pkg != nil {
		bits |= 1 << 2
	}
	b := make([]byte, l)
	b[0] = bits
	b[1] = uint8(len(name) >> 8)
	b[2] = uint8(len(name))
	copy(b[3:], name)
	if len(tag) > 0 {
		tb := b[3+len(name):]
		tb[0] = uint8(len(tag) >> 8)
		tb[1] = uint8(len(tag))
		copy(tb[2:], tag)
	}

615 616 617
	ot = int(s.WriteBytes(Ctxt, int64(ot), b))

	if pkg != nil {
618
		ot = dgopkgpathOff(s, ot, pkg)
619 620 621 622 623 624 625 626
	}

	return ot
}

var dnameCount int

// dname creates a reflect.name for a struct field or method.
627
func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
628 629 630 631 632
	// Write out data as "type.." to signal two things to the
	// linker, first that when dynamically linking, the symbol
	// should be moved to a relro section, and second that the
	// contents should not be decoded as a type.
	sname := "type..namedata."
633
	if pkg == nil {
634 635 636 637 638 639 640 641
		// In the common case, share data with other packages.
		if name == "" {
			if exported {
				sname += "-noname-exported." + tag
			} else {
				sname += "-noname-unexported." + tag
			}
		} else {
642 643 644 645 646
			if exported {
				sname += name + "." + tag
			} else {
				sname += name + "-" + tag
			}
647
		}
648
	} else {
649
		sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
650 651
		dnameCount++
	}
652
	s := Ctxt.Lookup(sname)
653 654 655 656
	if len(s.P) > 0 {
		return s
	}
	ot := dnameData(s, 0, name, tag, pkg, exported)
657
	ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
658
	return s
659 660
}

661 662 663
// dextratype dumps the fields of a runtime.uncommontype.
// dataAdd is the offset in bytes after the header where the
// backing array of the []method field is written (by dextratypeData).
664
func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
665
	m := methods(t)
666
	if t.Sym == nil && len(m) == 0 {
667
		return ot
668
	}
669 670
	noff := int(Rnd(int64(ot), int64(Widthptr)))
	if noff != ot {
671
		Fatalf("unexpected alignment in dextratype for %v", t)
672
	}
673

674
	for _, a := range m {
675 676 677
		dtypesym(a.type_)
	}

678
	ot = dgopkgpathOff(lsym, ot, typePkg(t))
679

680
	dataAdd += uncommonSize(t)
681 682
	mcount := len(m)
	if mcount != int(uint16(mcount)) {
683
		Fatalf("too many methods on %v: %d", t, mcount)
684
	}
685
	xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
686
	if dataAdd != int(uint32(dataAdd)) {
687
		Fatalf("methods are too far away on %v: %d", t, dataAdd)
688
	}
689

690
	ot = duint16(lsym, ot, uint16(mcount))
691
	ot = duint16(lsym, ot, uint16(xcount))
692 693
	ot = duint32(lsym, ot, uint32(dataAdd))
	ot = duint32(lsym, ot, 0)
694 695 696
	return ot
}

697
func typePkg(t *types.Type) *types.Pkg {
698
	tsym := t.Sym
699 700
	if tsym == nil {
		switch t.Etype {
701
		case TARRAY, TSLICE, TPTR, TCHAN:
702 703 704 705
			if t.Elem() != nil {
				tsym = t.Elem().Sym
			}
		}
706
	}
707
	if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype {
708 709 710 711 712
		return tsym.Pkg
	}
	return nil
}

713 714
// dextratypeData dumps the backing array for the []method field of
// runtime.uncommontype.
715
func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
716
	for _, a := range methods(t) {
717
		// ../../../../runtime/type.go:/method
718
		exported := types.IsExported(a.name.Name)
719
		var pkg *types.Pkg
720 721
		if !exported && a.name.Pkg != typePkg(t) {
			pkg = a.name.Pkg
722
		}
723
		nsym := dname(a.name.Name, "", pkg, exported)
724

725
		ot = dsymptrOff(lsym, ot, nsym)
726
		ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
727 728
		ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
		ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
729 730 731 732
	}
	return ot
}

733
func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
734
	duint32(s, ot, 0)
735 736 737 738
	r := obj.Addrel(s)
	r.Off = int32(ot)
	r.Siz = 4
	r.Sym = x
739
	r.Type = objabi.R_METHODOFF
740
	return ot + 4
741 742
}

743
var kinds = []int{
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
	TINT:        objabi.KindInt,
	TUINT:       objabi.KindUint,
	TINT8:       objabi.KindInt8,
	TUINT8:      objabi.KindUint8,
	TINT16:      objabi.KindInt16,
	TUINT16:     objabi.KindUint16,
	TINT32:      objabi.KindInt32,
	TUINT32:     objabi.KindUint32,
	TINT64:      objabi.KindInt64,
	TUINT64:     objabi.KindUint64,
	TUINTPTR:    objabi.KindUintptr,
	TFLOAT32:    objabi.KindFloat32,
	TFLOAT64:    objabi.KindFloat64,
	TBOOL:       objabi.KindBool,
	TSTRING:     objabi.KindString,
759
	TPTR:        objabi.KindPtr,
760 761 762 763 764 765 766 767 768 769
	TSTRUCT:     objabi.KindStruct,
	TINTER:      objabi.KindInterface,
	TCHAN:       objabi.KindChan,
	TMAP:        objabi.KindMap,
	TARRAY:      objabi.KindArray,
	TSLICE:      objabi.KindSlice,
	TFUNC:       objabi.KindFunc,
	TCOMPLEX64:  objabi.KindComplex64,
	TCOMPLEX128: objabi.KindComplex128,
	TUNSAFEPTR:  objabi.KindUnsafePointer,
770 771
}

772
// typeptrdata returns the length in bytes of the prefix of t
773
// containing pointer data. Anything after this offset is scalar data.
774 775
func typeptrdata(t *types.Type) int64 {
	if !types.Haspointers(t) {
776 777 778 779
		return 0
	}

	switch t.Etype {
780
	case TPTR,
781 782 783 784
		TUNSAFEPTR,
		TFUNC,
		TCHAN,
		TMAP:
785
		return int64(Widthptr)
786 787 788

	case TSTRING:
		// struct { byte *str; intgo len; }
789
		return int64(Widthptr)
790 791 792 793

	case TINTER:
		// struct { Itab *tab;	void *data; } or
		// struct { Type *type; void *data; }
794
		// Note: see comment in plive.go:onebitwalktype1.
795
		return 2 * int64(Widthptr)
796

797 798 799 800
	case TSLICE:
		// struct { byte *array; uintgo len; uintgo cap; }
		return int64(Widthptr)

801
	case TARRAY:
802 803
		// haspointers already eliminated t.NumElem() == 0.
		return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem())
804 805 806

	case TSTRUCT:
		// Find the last field that has pointers.
807
		var lastPtrField *types.Field
808
		for _, t1 := range t.Fields().Slice() {
809
			if types.Haspointers(t1.Type) {
810 811 812
				lastPtrField = t1
			}
		}
813
		return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
814 815

	default:
816
		Fatalf("typeptrdata: unexpected type, %v", t)
817 818 819 820
		return 0
	}
}

821 822 823 824 825 826 827 828 829 830
// tflag is documented in reflect/type.go.
//
// tflag values must be kept in sync with copies in:
//	cmd/compile/internal/gc/reflect.go
//	cmd/link/internal/ld/decodesym.go
//	reflect/type.go
//	runtime/type.go
const (
	tflagUncommon  = 1 << 0
	tflagExtraStar = 1 << 1
831
	tflagNamed     = 1 << 2
832
)
833

834 835 836 837 838
var (
	algarray       *obj.LSym
	memhashvarlen  *obj.LSym
	memequalvarlen *obj.LSym
)
839

840
// dcommontype dumps the contents of a reflect.rtype (runtime._type).
841
func dcommontype(lsym *obj.LSym, t *types.Type) int {
842
	sizeofAlg := 2 * Widthptr
843
	if algarray == nil {
844
		algarray = sysvar("algarray")
845 846
	}
	dowidth(t)
847
	alg := algtype(t)
848
	var algsym *obj.LSym
849
	if alg == ASPECIAL || alg == AMEM {
850 851 852
		algsym = dalgsym(t)
	}

853
	sptrWeak := true
854
	var sptr *obj.LSym
855
	if !t.IsPtr() || t.IsPtrElem() {
856
		tptr := types.NewPtr(t)
857 858 859
		if t.Sym != nil || methods(tptr) != nil {
			sptrWeak = false
		}
860
		sptr = dtypesym(tptr)
861 862
	}

863 864
	gcsym, useGCProg, ptrdata := dgcsym(t)

865
	// ../../../../reflect/type.go:/^type.rtype
866
	// actual type structure
867
	//	type rtype struct {
868
	//		size          uintptr
869
	//		ptrdata       uintptr
870
	//		hash          uint32
871
	//		tflag         tflag
872 873 874
	//		align         uint8
	//		fieldAlign    uint8
	//		kind          uint8
875 876
	//		alg           *typeAlg
	//		gcdata        *byte
877
	//		str           nameOff
878
	//		ptrToThis     typeOff
879
	//	}
880
	ot := 0
881 882 883
	ot = duintptr(lsym, ot, uint64(t.Width))
	ot = duintptr(lsym, ot, uint64(ptrdata))
	ot = duint32(lsym, ot, typehash(t))
884 885 886 887 888

	var tflag uint8
	if uncommonSize(t) != 0 {
		tflag |= tflagUncommon
	}
889 890 891
	if t.Sym != nil && t.Sym.Name != "" {
		tflag |= tflagNamed
	}
892 893

	exported := false
894
	p := t.LongString()
895 896 897 898 899 900 901 902 903
	// If we're writing out type T,
	// we are very likely to write out type *T as well.
	// Use the string "*T"[1:] for "T", so that the two
	// share storage. This is a cheap way to reduce the
	// amount of space taken up by reflect strings.
	if !strings.HasPrefix(p, "*") {
		p = "*" + p
		tflag |= tflagExtraStar
		if t.Sym != nil {
904
			exported = types.IsExported(t.Sym.Name)
905 906 907
		}
	} else {
		if t.Elem() != nil && t.Elem().Sym != nil {
908
			exported = types.IsExported(t.Elem().Sym.Name)
909 910 911
		}
	}

912
	ot = duint8(lsym, ot, tflag)
913 914

	// runtime (and common sense) expects alignment to be a power of two.
915
	i := int(t.Align)
916 917 918 919 920

	if i == 0 {
		i = 1
	}
	if i&(i-1) != 0 {
921
		Fatalf("invalid alignment %d for %v", t.Align, t)
922
	}
923 924
	ot = duint8(lsym, ot, t.Align) // align
	ot = duint8(lsym, ot, t.Align) // fieldAlign
925 926

	i = kinds[t.Etype]
927
	if isdirectiface(t) {
928
		i |= objabi.KindDirectIface
929
	}
930
	if useGCProg {
931
		i |= objabi.KindGCProg
932
	}
933
	ot = duint8(lsym, ot, uint8(i)) // kind
934
	if algsym == nil {
935
		ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg)
936
	} else {
937
		ot = dsymptr(lsym, ot, algsym, 0)
938
	}
939
	ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
940

941
	nsym := dname(p, "", nil, exported)
942
	ot = dsymptrOff(lsym, ot, nsym) // str
943
	// ptrToThis
944
	if sptr == nil {
945
		ot = duint32(lsym, ot, 0)
946
	} else if sptrWeak {
947
		ot = dsymptrWeakOff(lsym, ot, sptr)
948
	} else {
949
		ot = dsymptrOff(lsym, ot, sptr)
950
	}
951 952 953 954

	return ot
}

955
// typeHasNoAlg reports whether t does not have any associated hash/eq
956 957 958 959 960 961
// algorithms because t, or some component of t, is marked Noalg.
func typeHasNoAlg(t *types.Type) bool {
	a, bad := algtype1(t)
	return a == ANOEQ && bad.Noalg()
}

962
func typesymname(t *types.Type) string {
963
	name := t.ShortString()
964
	// Use a separate symbol name for Noalg types for #17752.
965
	if typeHasNoAlg(t) {
966 967
		name = "noalg." + name
	}
968 969
	return name
}
970

971 972
// Fake package for runtime type info (headers)
// Don't access directly, use typeLookup below.
973 974 975 976
var (
	typepkgmu sync.Mutex // protects typepkg lookups
	typepkg   = types.NewPkg("type", "type")
)
977 978

func typeLookup(name string) *types.Sym {
979 980 981 982
	typepkgmu.Lock()
	s := typepkg.Lookup(name)
	typepkgmu.Unlock()
	return s
983 984
}

985
func typesym(t *types.Type) *types.Sym {
986
	return typeLookup(typesymname(t))
987 988
}

989 990
// tracksym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
991
func tracksym(t *types.Type, f *types.Field) *types.Sym {
992
	return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name)
993 994
}

995
func typesymprefix(prefix string, t *types.Type) *types.Sym {
996
	p := prefix + "." + t.ShortString()
997
	s := typeLookup(p)
998

999 1000
	// This function is for looking up type-related generated functions
	// (e.g. eq and hash). Make sure they are indeed generated.
1001
	signatmu.Lock()
1002
	addsignat(t)
1003
	signatmu.Unlock()
1004

1005 1006 1007 1008 1009
	//print("algsym: %s -> %+S\n", p, s);

	return s
}

1010
func typenamesym(t *types.Type) *types.Sym {
1011
	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
1012
		Fatalf("typenamesym %v", t)
1013
	}
1014
	s := typesym(t)
1015
	signatmu.Lock()
1016
	addsignat(t)
1017
	signatmu.Unlock()
1018 1019 1020 1021 1022
	return s
}

func typename(t *types.Type) *Node {
	s := typenamesym(t)
1023
	if s.Def == nil {
1024
		n := newnamel(src.NoXPos, s)
1025
		n.Type = types.Types[TUINT8]
1026
		n.SetClass(PEXTERN)
1027
		n.SetTypecheck(1)
1028
		s.Def = asTypesNode(n)
1029
	}
1030

1031 1032
	n := nod(OADDR, asNode(s.Def), nil)
	n.Type = types.NewPtr(asNode(s.Def).Type)
1033
	n.SetAddable(true)
1034
	n.SetTypecheck(1)
1035 1036 1037
	return n
}

1038
func itabname(t, itype *types.Type) *Node {
1039 1040
	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
		Fatalf("itabname(%v, %v)", t, itype)
1041
	}
1042
	s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
1043 1044
	if s.Def == nil {
		n := newname(s)
1045
		n.Type = types.Types[TUINT8]
1046
		n.SetClass(PEXTERN)
1047
		n.SetTypecheck(1)
1048
		s.Def = asTypesNode(n)
1049
		itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
1050
	}
1051

1052 1053
	n := nod(OADDR, asNode(s.Def), nil)
	n.Type = types.NewPtr(asNode(s.Def).Type)
1054
	n.SetAddable(true)
1055
	n.SetTypecheck(1)
1056
	return n
1057 1058
}

1059 1060
// isreflexive reports whether t has a reflexive equality operator.
// That is, if x==x for all x of type t.
1061
func isreflexive(t *types.Type) bool {
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
	switch t.Etype {
	case TBOOL,
		TINT,
		TUINT,
		TINT8,
		TUINT8,
		TINT16,
		TUINT16,
		TINT32,
		TUINT32,
		TINT64,
		TUINT64,
		TUINTPTR,
1075
		TPTR,
1076 1077 1078
		TUNSAFEPTR,
		TSTRING,
		TCHAN:
1079
		return true
1080 1081 1082 1083 1084 1085

	case TFLOAT32,
		TFLOAT64,
		TCOMPLEX64,
		TCOMPLEX128,
		TINTER:
1086
		return false
1087 1088

	case TARRAY:
1089
		return isreflexive(t.Elem())
1090 1091

	case TSTRUCT:
1092
		for _, t1 := range t.Fields().Slice() {
1093 1094
			if !isreflexive(t1.Type) {
				return false
1095 1096
			}
		}
1097
		return true
1098 1099

	default:
1100
		Fatalf("bad type for map key: %v", t)
1101
		return false
1102 1103 1104
	}
}

1105 1106
// needkeyupdate reports whether map updates with t as a key
// need the key to be updated.
1107
func needkeyupdate(t *types.Type) bool {
1108
	switch t.Etype {
1109
	case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32,
1110
		TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN:
1111 1112
		return false

1113
	case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0
1114 1115 1116 1117 1118
		TINTER,
		TSTRING: // strings might have smaller backing stores
		return true

	case TARRAY:
1119
		return needkeyupdate(t.Elem())
1120 1121

	case TSTRUCT:
1122
		for _, t1 := range t.Fields().Slice() {
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
			if needkeyupdate(t1.Type) {
				return true
			}
		}
		return false

	default:
		Fatalf("bad type for map key: %v", t)
		return true
	}
}

1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
// hashMightPanic reports whether the hash of a map key of type t might panic.
func hashMightPanic(t *types.Type) bool {
	switch t.Etype {
	case TINTER:
		return true

	case TARRAY:
		return hashMightPanic(t.Elem())

	case TSTRUCT:
		for _, t1 := range t.Fields().Slice() {
			if hashMightPanic(t1.Type) {
				return true
			}
		}
		return false

	default:
		return false
	}
}

1157 1158 1159 1160
// formalType replaces byte and rune aliases with real types.
// They've been separate internally to make error messages
// better, but we have to merge them in the reflect tables.
func formalType(t *types.Type) *types.Type {
1161
	if t == types.Bytetype || t == types.Runetype {
1162
		return types.Types[t.Etype]
1163
	}
1164 1165
	return t
}
1166

1167
func dtypesym(t *types.Type) *obj.LSym {
1168
	t = formalType(t)
1169
	if t.IsUntyped() {
1170
		Fatalf("dtypesym %v", t)
1171 1172
	}

1173
	s := typesym(t)
1174
	lsym := s.Linksym()
1175
	if s.Siggen() {
1176
		return lsym
1177
	}
1178
	s.SetSiggen(true)
1179 1180 1181 1182

	// special case (look for runtime below):
	// when compiling package runtime,
	// emit the type structures for int, float, etc.
1183
	tbase := t
1184

1185
	if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil {
1186
		tbase = t.Elem()
1187
	}
1188
	dupok := 0
1189 1190 1191 1192
	if tbase.Sym == nil {
		dupok = obj.DUPOK
	}

1193 1194
	if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
		// named types from other files are defined only by those files
1195
		if tbase.Sym != nil && tbase.Sym.Pkg != localpkg {
1196
			return lsym
1197 1198
		}
		// TODO(mdempsky): Investigate whether this can happen.
1199
		if tbase.Etype == TFORW {
1200
			return lsym
1201
		}
1202 1203
	}

1204
	ot := 0
1205 1206
	switch t.Etype {
	default:
1207
		ot = dcommontype(lsym, t)
1208
		ot = dextratype(lsym, ot, t, 0)
1209 1210

	case TARRAY:
1211 1212
		// ../../../../runtime/type.go:/arrayType
		s1 := dtypesym(t.Elem())
1213
		t2 := types.NewSlice(t.Elem())
1214
		s2 := dtypesym(t2)
1215
		ot = dcommontype(lsym, t)
1216 1217
		ot = dsymptr(lsym, ot, s1, 0)
		ot = dsymptr(lsym, ot, s2, 0)
1218 1219
		ot = duintptr(lsym, ot, uint64(t.NumElem()))
		ot = dextratype(lsym, ot, t, 0)
1220

1221 1222 1223
	case TSLICE:
		// ../../../../runtime/type.go:/sliceType
		s1 := dtypesym(t.Elem())
1224
		ot = dcommontype(lsym, t)
1225
		ot = dsymptr(lsym, ot, s1, 0)
1226
		ot = dextratype(lsym, ot, t, 0)
1227 1228

	case TCHAN:
1229
		// ../../../../runtime/type.go:/chanType
1230
		s1 := dtypesym(t.Elem())
1231
		ot = dcommontype(lsym, t)
1232
		ot = dsymptr(lsym, ot, s1, 0)
1233 1234
		ot = duintptr(lsym, ot, uint64(t.ChanDir()))
		ot = dextratype(lsym, ot, t, 0)
1235 1236

	case TFUNC:
1237
		for _, t1 := range t.Recvs().Fields().Slice() {
1238 1239
			dtypesym(t1.Type)
		}
1240
		isddd := false
1241
		for _, t1 := range t.Params().Fields().Slice() {
1242
			isddd = t1.IsDDD()
1243 1244
			dtypesym(t1.Type)
		}
1245
		for _, t1 := range t.Results().Fields().Slice() {
1246 1247 1248
			dtypesym(t1.Type)
		}

1249
		ot = dcommontype(lsym, t)
1250 1251
		inCount := t.NumRecvs() + t.NumParams()
		outCount := t.NumResults()
1252 1253
		if isddd {
			outCount |= 1 << 15
1254
		}
1255 1256
		ot = duint16(lsym, ot, uint16(inCount))
		ot = duint16(lsym, ot, uint16(outCount))
1257 1258
		if Widthptr == 8 {
			ot += 4 // align for *rtype
1259
		}
1260

1261
		dataAdd := (inCount + t.NumResults()) * Widthptr
1262
		ot = dextratype(lsym, ot, t, dataAdd)
1263

1264
		// Array of rtype pointers follows funcType.
1265
		for _, t1 := range t.Recvs().Fields().Slice() {
1266
			ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
1267
		}
1268
		for _, t1 := range t.Params().Fields().Slice() {
1269
			ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
1270
		}
1271
		for _, t1 := range t.Results().Fields().Slice() {
1272
			ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
1273 1274 1275
		}

	case TINTER:
1276
		m := imethods(t)
1277 1278
		n := len(m)
		for _, a := range m {
1279 1280 1281
			dtypesym(a.type_)
		}

1282
		// ../../../../runtime/type.go:/interfaceType
1283
		ot = dcommontype(lsym, t)
1284

1285 1286
		var tpkg *types.Pkg
		if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype {
1287 1288
			tpkg = t.Sym.Pkg
		}
1289
		ot = dgopkgpath(lsym, ot, tpkg)
1290

1291
		ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
1292 1293
		ot = duintptr(lsym, ot, uint64(n))
		ot = duintptr(lsym, ot, uint64(n))
1294
		dataAdd := imethodSize() * n
1295
		ot = dextratype(lsym, ot, t, dataAdd)
1296

1297
		for _, a := range m {
1298
			// ../../../../runtime/type.go:/imethod
1299
			exported := types.IsExported(a.name.Name)
1300
			var pkg *types.Pkg
1301 1302
			if !exported && a.name.Pkg != tpkg {
				pkg = a.name.Pkg
1303
			}
1304
			nsym := dname(a.name.Name, "", pkg, exported)
1305

1306 1307
			ot = dsymptrOff(lsym, ot, nsym)
			ot = dsymptrOff(lsym, ot, dtypesym(a.type_))
1308 1309
		}

1310
	// ../../../../runtime/type.go:/mapType
1311
	case TMAP:
1312
		s1 := dtypesym(t.Key())
1313
		s2 := dtypesym(t.Elem())
1314
		s3 := dtypesym(bmap(t))
1315
		ot = dcommontype(lsym, t)
1316 1317 1318
		ot = dsymptr(lsym, ot, s1, 0)
		ot = dsymptr(lsym, ot, s2, 0)
		ot = dsymptr(lsym, ot, s3, 0)
1319 1320 1321
		var flags uint32
		// Note: flags must match maptype accessors in ../../../../runtime/type.go
		// and maptype builder in ../../../../reflect/type.go:MapOf.
1322
		if t.Key().Width > MAXKEYSIZE {
1323
			ot = duint8(lsym, ot, uint8(Widthptr))
1324
			flags |= 1 // indirect key
1325
		} else {
1326
			ot = duint8(lsym, ot, uint8(t.Key().Width))
1327 1328
		}

1329
		if t.Elem().Width > MAXELEMSIZE {
1330
			ot = duint8(lsym, ot, uint8(Widthptr))
1331
			flags |= 2 // indirect value
1332
		} else {
1333
			ot = duint8(lsym, ot, uint8(t.Elem().Width))
1334
		}
1335
		ot = duint16(lsym, ot, uint16(bmap(t).Width))
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
		if isreflexive(t.Key()) {
			flags |= 4 // reflexive key
		}
		if needkeyupdate(t.Key()) {
			flags |= 8 // need key update
		}
		if hashMightPanic(t.Key()) {
			flags |= 16 // hash might panic
		}
		ot = duint32(lsym, ot, flags)
1346
		ot = dextratype(lsym, ot, t, 0)
1347

1348
	case TPTR:
1349
		if t.Elem().Etype == TANY {
1350
			// ../../../../runtime/type.go:/UnsafePointerType
1351
			ot = dcommontype(lsym, t)
1352
			ot = dextratype(lsym, ot, t, 0)
1353 1354 1355 1356

			break
		}

1357
		// ../../../../runtime/type.go:/ptrType
1358
		s1 := dtypesym(t.Elem())
1359

1360
		ot = dcommontype(lsym, t)
1361
		ot = dsymptr(lsym, ot, s1, 0)
1362
		ot = dextratype(lsym, ot, t, 0)
1363

1364
	// ../../../../runtime/type.go:/structType
1365 1366
	// for security, only the exported fields.
	case TSTRUCT:
1367 1368
		fields := t.Fields().Slice()
		for _, t1 := range fields {
1369 1370 1371
			dtypesym(t1.Type)
		}

1372 1373 1374 1375 1376 1377
		// All non-exported struct field names within a struct
		// type must originate from a single package. By
		// identifying and recording that package within the
		// struct type descriptor, we can omit that
		// information from the field descriptors.
		var spkg *types.Pkg
1378
		for _, f := range fields {
1379
			if !types.IsExported(f.Sym.Name) {
1380
				spkg = f.Sym.Pkg
1381 1382
				break
			}
1383
		}
1384

1385
		ot = dcommontype(lsym, t)
1386
		ot = dgopkgpath(lsym, ot, spkg)
1387
		ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
1388 1389
		ot = duintptr(lsym, ot, uint64(len(fields)))
		ot = duintptr(lsym, ot, uint64(len(fields)))
1390

1391
		dataAdd := len(fields) * structfieldSize()
1392
		ot = dextratype(lsym, ot, t, dataAdd)
1393

1394
		for _, f := range fields {
1395
			// ../../../../runtime/type.go:/structField
1396
			ot = dnameField(lsym, ot, spkg, f)
1397
			ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
1398 1399 1400 1401 1402 1403 1404
			offsetAnon := uint64(f.Offset) << 1
			if offsetAnon>>1 != uint64(f.Offset) {
				Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
			}
			if f.Embedded != 0 {
				offsetAnon |= 1
			}
1405
			ot = duintptr(lsym, ot, offsetAnon)
1406 1407 1408
		}
	}

1409 1410
	ot = dextratypeData(lsym, ot, t)
	ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA))
1411 1412

	// The linker will leave a table of all the typelinks for
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
	// types in the binary, so the runtime can find them.
	//
	// When buildmode=shared, all types are in typelinks so the
	// runtime can deduplicate type pointers.
	keep := Ctxt.Flag_dynlink
	if !keep && t.Sym == nil {
		// For an unnamed type, we only need the link if the type can
		// be created at run time by reflect.PtrTo and similar
		// functions. If the type exists in the program, those
		// functions must return the existing type structure rather
		// than creating a new one.
1424
		switch t.Etype {
1425
		case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT:
1426
			keep = true
1427 1428
		}
	}
1429 1430 1431 1432
	// Do not put Noalg types in typelinks.  See issue #22605.
	if typeHasNoAlg(t) {
		keep = false
	}
1433
	lsym.Set(obj.AttrMakeTypelink, keep)
1434

1435
	return lsym
1436 1437
}

1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
// for each itabEntry, gather the methods on
// the concrete type that implement the interface
func peekitabs() {
	for i := range itabs {
		tab := &itabs[i]
		methods := genfun(tab.t, tab.itype)
		if len(methods) == 0 {
			continue
		}
		tab.entries = methods
	}
}

// for the given concrete type and interface
// type, return the (sorted) set of methods
// on the concrete type that implement the interface
1454
func genfun(t, it *types.Type) []*obj.LSym {
1455 1456 1457 1458 1459 1460
	if t == nil || it == nil {
		return nil
	}
	sigs := imethods(it)
	methods := methods(t)
	out := make([]*obj.LSym, 0, len(sigs))
1461 1462
	// TODO(mdempsky): Short circuit before calling methods(t)?
	// See discussion on CL 105039.
1463 1464 1465 1466 1467 1468 1469
	if len(sigs) == 0 {
		return nil
	}

	// both sigs and methods are sorted by name,
	// so we can find the intersect in a single pass
	for _, m := range methods {
1470
		if m.name == sigs[0].name {
1471
			out = append(out, m.isym.Linksym())
1472 1473 1474 1475 1476 1477 1478
			sigs = sigs[1:]
			if len(sigs) == 0 {
				break
			}
		}
	}

1479 1480 1481 1482
	if len(sigs) != 0 {
		Fatalf("incomplete itab")
	}

1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
	return out
}

// itabsym uses the information gathered in
// peekitabs to de-virtualize interface methods.
// Since this is called by the SSA backend, it shouldn't
// generate additional Nodes, Syms, etc.
func itabsym(it *obj.LSym, offset int64) *obj.LSym {
	var syms []*obj.LSym
	if it == nil {
		return nil
	}

	for i := range itabs {
		e := &itabs[i]
		if e.lsym == it {
			syms = e.entries
			break
		}
	}
	if syms == nil {
		return nil
	}

	// keep this arithmetic in sync with *itab layout
1508
	methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr))
1509 1510 1511 1512 1513 1514
	if methodnum >= len(syms) {
		return nil
	}
	return syms[methodnum]
}

1515
// addsignat ensures that a runtime type descriptor is emitted for t.
1516
func addsignat(t *types.Type) {
1517 1518 1519 1520
	if _, ok := signatset[t]; !ok {
		signatset[t] = struct{}{}
		signatslice = append(signatslice, t)
	}
1521 1522
}

1523
func addsignats(dcls []*Node) {
1524
	// copy types from dcl list to signatset
1525
	for _, n := range dcls {
1526
		if n.Op == OTYPE {
1527
			addsignat(n.Type)
1528 1529
		}
	}
1530
}
1531

1532
func dumpsignats() {
1533 1534
	// Process signatset. Use a loop, as dtypesym adds
	// entries to signatset while it is being processed.
1535 1536
	signats := make([]typeAndStr, len(signatslice))
	for len(signatslice) > 0 {
1537 1538
		signats = signats[:0]
		// Transfer entries to a slice and sort, for reproducible builds.
1539
		for _, t := range signatslice {
1540
			signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()})
1541
			delete(signatset, t)
1542
		}
1543
		signatslice = signatslice[:0]
1544 1545 1546 1547 1548 1549 1550
		sort.Sort(typesByString(signats))
		for _, ts := range signats {
			t := ts.t
			dtypesym(t)
			if t.Sym != nil {
				dtypesym(types.NewPtr(t))
			}
1551 1552
		}
	}
1553
}
1554

1555
func dumptabs() {
1556 1557 1558 1559 1560 1561
	// process itabs
	for _, i := range itabs {
		// dump empty itab symbol into i.sym
		// type itab struct {
		//   inter  *interfacetype
		//   _type  *_type
1562
		//   hash   uint32
1563
		//   _      [4]byte
1564 1565
		//   fun    [1]uintptr // variable sized
		// }
1566 1567
		o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0)
		o = dsymptr(i.lsym, o, dtypesym(i.t), 0)
1568 1569 1570 1571 1572 1573 1574
		o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash
		o += 4                                // skip unused field
		for _, fn := range genfun(i.t, i.itype) {
			o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method
		}
		// Nothing writes static itabs, so they are read only.
		ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
1575
		ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym()
1576
		dsymptr(ilink, 0, i.lsym, 0)
1577
		ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA))
1578 1579
	}

1580 1581 1582
	// process ptabs
	if localpkg.Name == "main" && len(ptabs) > 0 {
		ot := 0
1583
		s := Ctxt.Lookup("go.plugin.tabs")
1584 1585 1586 1587 1588 1589 1590 1591
		for _, p := range ptabs {
			// Dump ptab symbol into go.pluginsym package.
			//
			// type ptab struct {
			//	name nameOff
			//	typ  typeOff // pointer to symbol
			// }
			nsym := dname(p.s.Name, "", nil, true)
1592 1593
			ot = dsymptrOff(s, ot, nsym)
			ot = dsymptrOff(s, ot, dtypesym(p.t))
1594
		}
1595
		ggloblsym(s, int32(ot), int16(obj.RODATA))
1596 1597

		ot = 0
1598
		s = Ctxt.Lookup("go.plugin.exports")
1599
		for _, p := range ptabs {
1600
			ot = dsymptr(s, ot, p.s.Linksym(), 0)
1601
		}
1602
		ggloblsym(s, int32(ot), int16(obj.RODATA))
1603
	}
1604
}
1605

1606
func dumpimportstrings() {
1607
	// generate import strings for imported packages
1608 1609
	for _, p := range types.ImportedPkgList() {
		dimportpath(p)
1610
	}
1611
}
1612

1613
func dumpbasictypes() {
1614 1615 1616 1617 1618
	// do basic types if compiling package runtime.
	// they have to be in at least one package,
	// and runtime is always loaded implicitly,
	// so this is as good as any.
	// another possible choice would be package main,
1619
	// but using runtime means fewer copies in object files.
1620
	if myimportpath == "runtime" {
1621 1622
		for i := types.EType(1); i <= TBOOL; i++ {
			dtypesym(types.NewPtr(types.Types[i]))
1623
		}
1624 1625
		dtypesym(types.NewPtr(types.Types[TSTRING]))
		dtypesym(types.NewPtr(types.Types[TUNSAFEPTR]))
1626 1627 1628

		// emit type structs for error and func(error) string.
		// The latter is the type of an auto-generated wrapper.
1629
		dtypesym(types.NewPtr(types.Errortype))
1630

1631
		dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])}))
1632 1633 1634 1635

		// add paths for runtime and main, which 6l imports implicitly.
		dimportpath(Runtimepkg)

1636
		if flag_race {
1637 1638
			dimportpath(racepkg)
		}
1639
		if flag_msan {
1640 1641
			dimportpath(msanpkg)
		}
1642
		dimportpath(types.NewPkg("main", ""))
1643 1644 1645
	}
}

1646
type typeAndStr struct {
1647 1648 1649
	t       *types.Type
	short   string
	regular string
1650 1651 1652 1653
}

type typesByString []typeAndStr

1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
func (a typesByString) Len() int { return len(a) }
func (a typesByString) Less(i, j int) bool {
	if a[i].short != a[j].short {
		return a[i].short < a[j].short
	}
	// When the only difference between the types is whether
	// they refer to byte or uint8, such as **byte vs **uint8,
	// the types' ShortStrings can be identical.
	// To preserve deterministic sort ordering, sort these by String().
	return a[i].regular < a[j].regular
}
func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
1666

1667 1668 1669 1670
func dalgsym(t *types.Type) *obj.LSym {
	var lsym *obj.LSym
	var hashfunc *obj.LSym
	var eqfunc *obj.LSym
1671 1672 1673 1674 1675 1676

	// dalgsym is only called for a type that needs an algorithm table,
	// which implies that the type is comparable (or else it would use ANOEQ).

	if algtype(t) == AMEM {
		// we use one algorithm table for all AMEM types of a given size
1677
		p := fmt.Sprintf(".alg%d", t.Width)
1678

1679
		s := typeLookup(p)
1680
		lsym = s.Linksym()
1681
		if s.AlgGen() {
1682
			return lsym
1683
		}
1684
		s.SetAlgGen(true)
1685

1686
		if memhashvarlen == nil {
1687
			memhashvarlen = sysfunc("memhash_varlen")
1688
			memequalvarlen = sysvar("memequal_varlen") // asm func
1689 1690
		}

1691 1692 1693
		// make hash closure
		p = fmt.Sprintf(".hashfunc%d", t.Width)

1694
		hashfunc = typeLookup(p).Linksym()
1695

1696
		ot := 0
1697 1698 1699
		ot = dsymptr(hashfunc, ot, memhashvarlen, 0)
		ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure
		ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
1700 1701 1702 1703

		// make equality closure
		p = fmt.Sprintf(".eqfunc%d", t.Width)

1704
		eqfunc = typeLookup(p).Linksym()
1705 1706

		ot = 0
1707 1708 1709
		ot = dsymptr(eqfunc, ot, memequalvarlen, 0)
		ot = duintptr(eqfunc, ot, uint64(t.Width))
		ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA)
1710 1711
	} else {
		// generate an alg table specific to this type
1712 1713
		s := typesymprefix(".alg", t)
		lsym = s.Linksym()
1714

1715 1716
		hash := typesymprefix(".hash", t)
		eq := typesymprefix(".eq", t)
1717 1718
		hashfunc = typesymprefix(".hashfunc", t).Linksym()
		eqfunc = typesymprefix(".eqfunc", t).Linksym()
1719 1720 1721 1722 1723

		genhash(hash, t)
		geneq(eq, t)

		// make Go funcs (closures) for calling hash and equal from Go
1724 1725 1726 1727
		dsymptr(hashfunc, 0, hash.Linksym(), 0)
		ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
		dsymptr(eqfunc, 0, eq.Linksym(), 0)
		ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
1728 1729
	}

1730
	// ../../../../runtime/alg.go:/typeAlg
1731
	ot := 0
1732

1733 1734 1735 1736
	ot = dsymptr(lsym, ot, hashfunc, 0)
	ot = dsymptr(lsym, ot, eqfunc, 0)
	ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA)
	return lsym
1737 1738
}

1739 1740
// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
// which holds 1-bit entries describing where pointers are in a given type.
1741 1742 1743 1744 1745 1746 1747
// Above this length, the GC information is recorded as a GC program,
// which can express repetition compactly. In either form, the
// information is used by the runtime to initialize the heap bitmap,
// and for large types (like 128 or more words), they are roughly the
// same speed. GC programs are never much larger and often more
// compact. (If large arrays are involved, they can be arbitrarily
// more compact.)
1748 1749 1750 1751 1752 1753 1754 1755 1756
//
// The cutoff must be large enough that any allocation large enough to
// use a GC program is large enough that it does not share heap bitmap
// bytes with any other objects, allowing the GC program execution to
// assume an aligned start and not use atomic operations. In the current
// runtime, this means all malloc size classes larger than the cutoff must
// be multiples of four words. On 32-bit systems that's 16 bytes, and
// all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
1757
// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated
1758 1759 1760
// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
// must be >= 4.
//
1761
// We used to use 16 because the GC programs do have some constant overhead
1762 1763
// to get started, and processing 128 pointers seems to be enough to
// amortize that overhead well.
1764 1765 1766 1767 1768 1769 1770 1771
//
// To make sure that the runtime's chansend can call typeBitsBulkBarrier,
// we raised the limit to 2048, so that even 32-bit systems are guaranteed to
// use bitmaps for objects up to 64 kB in size.
//
// Also known to reflect/type.go.
//
const maxPtrmaskBytes = 2048
1772 1773 1774 1775

// dgcsym emits and returns a data symbol containing GC information for type t,
// along with a boolean reporting whether the UseGCProg bit should be set in
// the type kind, and the ptrdata field to record in the reflect type information.
1776
func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
1777 1778
	ptrdata = typeptrdata(t)
	if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 {
1779
		lsym = dgcptrmask(t)
1780
		return
1781 1782
	}

1783
	useGCProg = true
1784
	lsym, ptrdata = dgcprog(t)
1785 1786
	return
}
1787

1788
// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
1789
func dgcptrmask(t *types.Type) *obj.LSym {
1790 1791 1792 1793
	ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8)
	fillptrmask(t, ptrmask)
	p := fmt.Sprintf("gcbits.%x", ptrmask)

1794
	sym := Runtimepkg.Lookup(p)
1795
	lsym := sym.Linksym()
1796 1797
	if !sym.Uniq() {
		sym.SetUniq(true)
1798
		for i, x := range ptrmask {
1799
			duint8(lsym, i, x)
1800
		}
1801
		ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
1802
	}
1803
	return lsym
1804 1805
}

1806 1807 1808
// fillptrmask fills in ptrmask with 1s corresponding to the
// word offsets in t that hold pointers.
// ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits.
1809
func fillptrmask(t *types.Type, ptrmask []byte) {
1810 1811
	for i := range ptrmask {
		ptrmask[i] = 0
1812
	}
1813
	if !types.Haspointers(t) {
1814 1815 1816
		return
	}

1817
	vec := bvalloc(8 * int32(len(ptrmask)))
1818
	onebitwalktype1(t, 0, vec)
1819

1820
	nptr := typeptrdata(t) / int64(Widthptr)
1821
	for i := int64(0); i < nptr; i++ {
1822
		if vec.Get(int32(i)) {
1823
			ptrmask[i/8] |= 1 << (uint(i) % 8)
1824 1825 1826 1827
		}
	}
}

1828 1829 1830 1831
// dgcprog emits and returns the symbol containing a GC program for type t
// along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]).
// In practice, the size is typeptrdata(t) except for non-trivial arrays.
// For non-trivial arrays, the program describes the full t.Width size.
1832
func dgcprog(t *types.Type) (*obj.LSym, int64) {
1833 1834
	dowidth(t)
	if t.Width == BADWIDTH {
1835
		Fatalf("dgcprog: %v badwidth", t)
1836
	}
1837
	lsym := typesymprefix(".gcprog", t).Linksym()
1838
	var p GCProg
1839
	p.init(lsym)
1840 1841 1842 1843
	p.emit(t, 0)
	offset := p.w.BitIndex() * int64(Widthptr)
	p.end()
	if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
1844
		Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
1845
	}
1846
	return lsym, offset
1847 1848
}

1849
type GCProg struct {
1850
	lsym   *obj.LSym
1851 1852
	symoff int
	w      gcprog.Writer
1853 1854
}

1855
var Debug_gcprog int // set by -d gcprog
1856

1857 1858
func (p *GCProg) init(lsym *obj.LSym) {
	p.lsym = lsym
1859 1860 1861
	p.symoff = 4 // first 4 bytes hold program length
	p.w.Init(p.writeByte)
	if Debug_gcprog > 0 {
1862
		fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
1863
		p.w.Debug(os.Stderr)
1864 1865 1866
	}
}

1867
func (p *GCProg) writeByte(x byte) {
1868
	p.symoff = duint8(p.lsym, p.symoff, x)
1869 1870
}

1871 1872
func (p *GCProg) end() {
	p.w.End()
1873 1874
	duint32(p.lsym, 0, uint32(p.symoff-4))
	ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
1875
	if Debug_gcprog > 0 {
1876
		fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
1877 1878 1879
	}
}

1880
func (p *GCProg) emit(t *types.Type, offset int64) {
1881
	dowidth(t)
1882
	if !types.Haspointers(t) {
1883
		return
1884
	}
1885 1886 1887
	if t.Width == int64(Widthptr) {
		p.w.Ptr(offset / int64(Widthptr))
		return
1888 1889
	}
	switch t.Etype {
1890
	default:
1891
		Fatalf("GCProg.emit: unexpected type %v", t)
1892 1893

	case TSTRING:
1894
		p.w.Ptr(offset / int64(Widthptr))
1895 1896

	case TINTER:
1897
		// Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
1898
		p.w.Ptr(offset/int64(Widthptr) + 1)
1899

1900 1901 1902
	case TSLICE:
		p.w.Ptr(offset / int64(Widthptr))

1903
	case TARRAY:
1904
		if t.NumElem() == 0 {
1905
			// should have been handled by haspointers check above
1906
			Fatalf("GCProg.emit: empty array")
1907 1908 1909
		}

		// Flatten array-of-array-of-array to just a big array by multiplying counts.
1910
		count := t.NumElem()
1911
		elem := t.Elem()
1912
		for elem.IsArray() {
1913
			count *= elem.NumElem()
1914
			elem = elem.Elem()
1915 1916 1917 1918 1919 1920
		}

		if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) {
			// Cheaper to just emit the bits.
			for i := int64(0); i < count; i++ {
				p.emit(elem, offset+i*elem.Width)
1921
			}
1922
			return
1923
		}
1924 1925 1926
		p.emit(elem, offset)
		p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
		p.w.Repeat(elem.Width/int64(Widthptr), count-1)
1927 1928

	case TSTRUCT:
1929
		for _, t1 := range t.Fields().Slice() {
1930
			p.emit(t1.Type, offset+t1.Offset)
1931 1932 1933
		}
	}
}
1934 1935 1936 1937 1938

// zeroaddr returns the address of a symbol with at least
// size bytes of zeros.
func zeroaddr(size int64) *Node {
	if size >= 1<<31 {
1939
		Fatalf("map elem too big %d", size)
1940 1941 1942 1943
	}
	if zerosize < size {
		zerosize = size
	}
1944
	s := mappkg.Lookup("zero")
1945 1946
	if s.Def == nil {
		x := newname(s)
1947
		x.Type = types.Types[TUINT8]
1948
		x.SetClass(PEXTERN)
1949
		x.SetTypecheck(1)
1950
		s.Def = asTypesNode(x)
1951
	}
1952 1953
	z := nod(OADDR, asNode(s.Def), nil)
	z.Type = types.NewPtr(types.Types[TUINT8])
1954
	z.SetAddable(true)
1955
	z.SetTypecheck(1)
1956 1957
	return z
}