PR target/84827
[official-gcc.git] / libgo / go / reflect / type.go
blob6b082c172b440d4245a3132a9079dbb052cc6c1e
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
9 //
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
12 // for that type.
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
16 package reflect
18 import (
19 "strconv"
20 "sync"
21 "unicode"
22 "unicode/utf8"
23 "unsafe"
26 // Type is the representation of a Go type.
28 // Not all methods apply to all kinds of types. Restrictions,
29 // if any, are noted in the documentation for each method.
30 // Use the Kind method to find out the kind of type before
31 // calling kind-specific methods. Calling a method
32 // inappropriate to the kind of type causes a run-time panic.
34 // Type values are comparable, such as with the == operator,
35 // so they can be used as map keys.
36 // Two Type values are equal if they represent identical types.
37 type Type interface {
38 // Methods applicable to all types.
40 // Align returns the alignment in bytes of a value of
41 // this type when allocated in memory.
42 Align() int
44 // FieldAlign returns the alignment in bytes of a value of
45 // this type when used as a field in a struct.
46 FieldAlign() int
48 // Method returns the i'th method in the type's method set.
49 // It panics if i is not in the range [0, NumMethod()).
51 // For a non-interface type T or *T, the returned Method's Type and Func
52 // fields describe a function whose first argument is the receiver.
54 // For an interface type, the returned Method's Type field gives the
55 // method signature, without a receiver, and the Func field is nil.
56 Method(int) Method
58 // MethodByName returns the method with that name in the type's
59 // method set and a boolean indicating if the method was found.
61 // For a non-interface type T or *T, the returned Method's Type and Func
62 // fields describe a function whose first argument is the receiver.
64 // For an interface type, the returned Method's Type field gives the
65 // method signature, without a receiver, and the Func field is nil.
66 MethodByName(string) (Method, bool)
68 // NumMethod returns the number of exported methods in the type's method set.
69 NumMethod() int
71 // Name returns the type's name within its package.
72 // It returns an empty string for unnamed types.
73 Name() string
75 // PkgPath returns a named type's package path, that is, the import path
76 // that uniquely identifies the package, such as "encoding/base64".
77 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int),
78 // the package path will be the empty string.
79 PkgPath() string
81 // Size returns the number of bytes needed to store
82 // a value of the given type; it is analogous to unsafe.Sizeof.
83 Size() uintptr
85 // String returns a string representation of the type.
86 // The string representation may use shortened package names
87 // (e.g., base64 instead of "encoding/base64") and is not
88 // guaranteed to be unique among types. To test for type identity,
89 // compare the Types directly.
90 String() string
92 // Used internally by gccgo--the string retaining quoting.
93 rawString() string
95 // Kind returns the specific kind of this type.
96 Kind() Kind
98 // Implements reports whether the type implements the interface type u.
99 Implements(u Type) bool
101 // AssignableTo reports whether a value of the type is assignable to type u.
102 AssignableTo(u Type) bool
104 // ConvertibleTo reports whether a value of the type is convertible to type u.
105 ConvertibleTo(u Type) bool
107 // Comparable reports whether values of this type are comparable.
108 Comparable() bool
110 // Methods applicable only to some types, depending on Kind.
111 // The methods allowed for each kind are:
113 // Int*, Uint*, Float*, Complex*: Bits
114 // Array: Elem, Len
115 // Chan: ChanDir, Elem
116 // Func: In, NumIn, Out, NumOut, IsVariadic.
117 // Map: Key, Elem
118 // Ptr: Elem
119 // Slice: Elem
120 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
122 // Bits returns the size of the type in bits.
123 // It panics if the type's Kind is not one of the
124 // sized or unsized Int, Uint, Float, or Complex kinds.
125 Bits() int
127 // ChanDir returns a channel type's direction.
128 // It panics if the type's Kind is not Chan.
129 ChanDir() ChanDir
131 // IsVariadic reports whether a function type's final input parameter
132 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
133 // implicit actual type []T.
135 // For concreteness, if t represents func(x int, y ... float64), then
137 // t.NumIn() == 2
138 // t.In(0) is the reflect.Type for "int"
139 // t.In(1) is the reflect.Type for "[]float64"
140 // t.IsVariadic() == true
142 // IsVariadic panics if the type's Kind is not Func.
143 IsVariadic() bool
145 // Elem returns a type's element type.
146 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
147 Elem() Type
149 // Field returns a struct type's i'th field.
150 // It panics if the type's Kind is not Struct.
151 // It panics if i is not in the range [0, NumField()).
152 Field(i int) StructField
154 // FieldByIndex returns the nested field corresponding
155 // to the index sequence. It is equivalent to calling Field
156 // successively for each index i.
157 // It panics if the type's Kind is not Struct.
158 FieldByIndex(index []int) StructField
160 // FieldByName returns the struct field with the given name
161 // and a boolean indicating if the field was found.
162 FieldByName(name string) (StructField, bool)
164 // FieldByNameFunc returns the struct field with a name
165 // that satisfies the match function and a boolean indicating if
166 // the field was found.
168 // FieldByNameFunc considers the fields in the struct itself
169 // and then the fields in any anonymous structs, in breadth first order,
170 // stopping at the shallowest nesting depth containing one or more
171 // fields satisfying the match function. If multiple fields at that depth
172 // satisfy the match function, they cancel each other
173 // and FieldByNameFunc returns no match.
174 // This behavior mirrors Go's handling of name lookup in
175 // structs containing anonymous fields.
176 FieldByNameFunc(match func(string) bool) (StructField, bool)
178 // In returns the type of a function type's i'th input parameter.
179 // It panics if the type's Kind is not Func.
180 // It panics if i is not in the range [0, NumIn()).
181 In(i int) Type
183 // Key returns a map type's key type.
184 // It panics if the type's Kind is not Map.
185 Key() Type
187 // Len returns an array type's length.
188 // It panics if the type's Kind is not Array.
189 Len() int
191 // NumField returns a struct type's field count.
192 // It panics if the type's Kind is not Struct.
193 NumField() int
195 // NumIn returns a function type's input parameter count.
196 // It panics if the type's Kind is not Func.
197 NumIn() int
199 // NumOut returns a function type's output parameter count.
200 // It panics if the type's Kind is not Func.
201 NumOut() int
203 // Out returns the type of a function type's i'th output parameter.
204 // It panics if the type's Kind is not Func.
205 // It panics if i is not in the range [0, NumOut()).
206 Out(i int) Type
208 common() *rtype
209 uncommon() *uncommonType
212 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
213 // if the names are equal, even if they are unexported names originating
214 // in different packages. The practical effect of this is that the result of
215 // t.FieldByName("x") is not well defined if the struct type t contains
216 // multiple fields named x (embedded from different packages).
217 // FieldByName may return one of the fields named x or may report that there are none.
218 // See https://golang.org/issue/4876 for more details.
221 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
222 * A few are known to ../runtime/type.go to convey to debuggers.
223 * They are also known to ../runtime/type.go.
226 // A Kind represents the specific kind of type that a Type represents.
227 // The zero Kind is not a valid kind.
228 type Kind uint
230 const (
231 Invalid Kind = iota
232 Bool
234 Int8
235 Int16
236 Int32
237 Int64
238 Uint
239 Uint8
240 Uint16
241 Uint32
242 Uint64
243 Uintptr
244 Float32
245 Float64
246 Complex64
247 Complex128
248 Array
249 Chan
250 Func
251 Interface
254 Slice
255 String
256 Struct
257 UnsafePointer
260 // rtype is the common implementation of most values.
261 // It is embedded in other, public struct types, but always
262 // with a unique tag like `reflect:"array"` or `reflect:"ptr"`
263 // so that code cannot convert from, say, *arrayType to *ptrType.
265 // rtype must be kept in sync with ../runtime/type.go:/^type._type.
266 type rtype struct {
267 size uintptr
268 ptrdata uintptr // size of memory prefix holding all pointers
269 hash uint32 // hash of type; avoids computation in hash tables
270 kind uint8 // enumeration for C
271 align int8 // alignment of variable with this type
272 fieldAlign uint8 // alignment of struct field with this type
273 _ uint8 // unused/padding
275 hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
276 equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
278 gcdata *byte // garbage collection data
279 string *string // string form; unnecessary but undeniably useful
280 *uncommonType // (relatively) uncommon fields
281 ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
284 // Method on non-interface type
285 type method struct {
286 name *string // name of method
287 pkgPath *string // nil for exported Names; otherwise import path
288 mtyp *rtype // method type (without receiver)
289 typ *rtype // .(*FuncType) underneath (with receiver)
290 tfn unsafe.Pointer // fn used for normal method call
293 // uncommonType is present only for types with names or methods
294 // (if T is a named type, the uncommonTypes for T and *T have methods).
295 // Using a pointer to this struct reduces the overall size required
296 // to describe an unnamed type with no methods.
297 type uncommonType struct {
298 name *string // name of type
299 pkgPath *string // import path; nil for built-in types like int, string
300 methods []method // methods associated with type
303 // ChanDir represents a channel type's direction.
304 type ChanDir int
306 const (
307 RecvDir ChanDir = 1 << iota // <-chan
308 SendDir // chan<-
309 BothDir = RecvDir | SendDir // chan
312 // arrayType represents a fixed array type.
313 type arrayType struct {
314 rtype `reflect:"array"`
315 elem *rtype // array element type
316 slice *rtype // slice type
317 len uintptr
320 // chanType represents a channel type.
321 type chanType struct {
322 rtype `reflect:"chan"`
323 elem *rtype // channel element type
324 dir uintptr // channel direction (ChanDir)
327 // funcType represents a function type.
328 type funcType struct {
329 rtype `reflect:"func"`
330 dotdotdot bool // last input parameter is ...
331 in []*rtype // input parameter types
332 out []*rtype // output parameter types
335 // imethod represents a method on an interface type
336 type imethod struct {
337 name *string // name of method
338 pkgPath *string // nil for exported Names; otherwise import path
339 typ *rtype // .(*FuncType) underneath
342 // interfaceType represents an interface type.
343 type interfaceType struct {
344 rtype `reflect:"interface"`
345 methods []imethod // sorted by hash
348 // mapType represents a map type.
349 type mapType struct {
350 rtype `reflect:"map"`
351 key *rtype // map key type
352 elem *rtype // map element (value) type
353 bucket *rtype // internal bucket structure
354 hmap *rtype // internal map header
355 keysize uint8 // size of key slot
356 indirectkey uint8 // store ptr to key instead of key itself
357 valuesize uint8 // size of value slot
358 indirectvalue uint8 // store ptr to value instead of value itself
359 bucketsize uint16 // size of bucket
360 reflexivekey bool // true if k==k for all keys
361 needkeyupdate bool // true if we need to update key on an overwrite
364 // ptrType represents a pointer type.
365 type ptrType struct {
366 rtype `reflect:"ptr"`
367 elem *rtype // pointer element (pointed at) type
370 // sliceType represents a slice type.
371 type sliceType struct {
372 rtype `reflect:"slice"`
373 elem *rtype // slice element type
376 // Struct field
377 type structField struct {
378 name *string // name is always non-empty
379 pkgPath *string // nil for exported Names; otherwise import path
380 typ *rtype // type of field
381 tag *string // nil if no tag
382 offsetAnon uintptr // byte offset of field<<1 | isAnonymous
385 func (f *structField) offset() uintptr {
386 return f.offsetAnon >> 1
389 func (f *structField) anon() bool {
390 return f.offsetAnon&1 != 0
393 // structType represents a struct type.
394 type structType struct {
395 rtype `reflect:"struct"`
396 fields []structField // sorted by offset
400 * The compiler knows the exact layout of all the data structures above.
401 * The compiler does not know about the data structures and methods below.
404 // Method represents a single method.
405 type Method struct {
406 // Name is the method name.
407 // PkgPath is the package path that qualifies a lower case (unexported)
408 // method name. It is empty for upper case (exported) method names.
409 // The combination of PkgPath and Name uniquely identifies a method
410 // in a method set.
411 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
412 Name string
413 PkgPath string
415 Type Type // method type
416 Func Value // func with receiver as first argument
417 Index int // index for Type.Method
420 const (
421 kindDirectIface = 1 << 5
422 kindGCProg = 1 << 6 // Type.gc points to GC program
423 kindNoPointers = 1 << 7
424 kindMask = (1 << 5) - 1
427 func (k Kind) String() string {
428 if int(k) < len(kindNames) {
429 return kindNames[k]
431 return "kind" + strconv.Itoa(int(k))
434 var kindNames = []string{
435 Invalid: "invalid",
436 Bool: "bool",
437 Int: "int",
438 Int8: "int8",
439 Int16: "int16",
440 Int32: "int32",
441 Int64: "int64",
442 Uint: "uint",
443 Uint8: "uint8",
444 Uint16: "uint16",
445 Uint32: "uint32",
446 Uint64: "uint64",
447 Uintptr: "uintptr",
448 Float32: "float32",
449 Float64: "float64",
450 Complex64: "complex64",
451 Complex128: "complex128",
452 Array: "array",
453 Chan: "chan",
454 Func: "func",
455 Interface: "interface",
456 Map: "map",
457 Ptr: "ptr",
458 Slice: "slice",
459 String: "string",
460 Struct: "struct",
461 UnsafePointer: "unsafe.Pointer",
464 func (t *uncommonType) uncommon() *uncommonType {
465 return t
468 func (t *uncommonType) PkgPath() string {
469 if t == nil || t.pkgPath == nil {
470 return ""
472 return *t.pkgPath
475 func (t *uncommonType) Name() string {
476 if t == nil || t.name == nil {
477 return ""
479 return *t.name
482 func (t *rtype) rawString() string { return *t.string }
484 func (t *rtype) String() string {
485 // For gccgo, strip out quoted strings.
486 s := *t.string
487 var q bool
488 r := make([]byte, len(s))
489 j := 0
490 for i := 0; i < len(s); i++ {
491 if s[i] == '\t' {
492 q = !q
493 } else if !q {
494 r[j] = s[i]
498 return string(r[:j])
501 func (t *rtype) Size() uintptr { return t.size }
503 func (t *rtype) Bits() int {
504 if t == nil {
505 panic("reflect: Bits of nil Type")
507 k := t.Kind()
508 if k < Int || k > Complex128 {
509 panic("reflect: Bits of non-arithmetic Type " + t.String())
511 return int(t.size) * 8
514 func (t *rtype) Align() int { return int(t.align) }
516 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
518 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
520 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
522 func (t *rtype) common() *rtype { return t }
524 var methodCache sync.Map // map[*rtype][]method
526 func (t *rtype) exportedMethods() []method {
527 methodsi, found := methodCache.Load(t)
528 if found {
529 return methodsi.([]method)
532 ut := t.uncommon()
533 if ut == nil {
534 return nil
536 allm := ut.methods
537 allExported := true
538 for _, m := range allm {
539 if m.pkgPath != nil {
540 allExported = false
541 break
544 var methods []method
545 if allExported {
546 methods = allm
547 } else {
548 methods = make([]method, 0, len(allm))
549 for _, m := range allm {
550 if m.pkgPath == nil {
551 methods = append(methods, m)
554 methods = methods[:len(methods):len(methods)]
557 methodsi, _ = methodCache.LoadOrStore(t, methods)
558 return methodsi.([]method)
561 func (t *rtype) NumMethod() int {
562 if t.Kind() == Interface {
563 tt := (*interfaceType)(unsafe.Pointer(t))
564 return tt.NumMethod()
566 if t.uncommonType == nil {
567 return 0 // avoid methodCache synchronization
569 return len(t.exportedMethods())
572 func (t *rtype) Method(i int) (m Method) {
573 if t.Kind() == Interface {
574 tt := (*interfaceType)(unsafe.Pointer(t))
575 return tt.Method(i)
577 methods := t.exportedMethods()
578 if i < 0 || i >= len(methods) {
579 panic("reflect: Method index out of range")
581 p := methods[i]
582 if p.name != nil {
583 m.Name = *p.name
585 fl := flag(Func)
586 mt := p.typ
587 m.Type = toType(mt)
588 x := new(unsafe.Pointer)
589 *x = unsafe.Pointer(&p.tfn)
590 m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
591 m.Index = i
592 return m
595 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
596 if t.Kind() == Interface {
597 tt := (*interfaceType)(unsafe.Pointer(t))
598 return tt.MethodByName(name)
600 ut := t.uncommon()
601 if ut == nil {
602 return Method{}, false
604 utmethods := ut.methods
605 var eidx int
606 for i := 0; i < len(utmethods); i++ {
607 p := utmethods[i]
608 if p.pkgPath == nil {
609 if p.name != nil && *p.name == name {
610 return t.Method(eidx), true
612 eidx++
615 return Method{}, false
618 func (t *rtype) PkgPath() string {
619 return t.uncommonType.PkgPath()
622 func (t *rtype) Name() string {
623 return t.uncommonType.Name()
626 func (t *rtype) ChanDir() ChanDir {
627 if t.Kind() != Chan {
628 panic("reflect: ChanDir of non-chan type")
630 tt := (*chanType)(unsafe.Pointer(t))
631 return ChanDir(tt.dir)
634 func (t *rtype) IsVariadic() bool {
635 if t.Kind() != Func {
636 panic("reflect: IsVariadic of non-func type")
638 tt := (*funcType)(unsafe.Pointer(t))
639 return tt.dotdotdot
642 func (t *rtype) Elem() Type {
643 switch t.Kind() {
644 case Array:
645 tt := (*arrayType)(unsafe.Pointer(t))
646 return toType(tt.elem)
647 case Chan:
648 tt := (*chanType)(unsafe.Pointer(t))
649 return toType(tt.elem)
650 case Map:
651 tt := (*mapType)(unsafe.Pointer(t))
652 return toType(tt.elem)
653 case Ptr:
654 tt := (*ptrType)(unsafe.Pointer(t))
655 return toType(tt.elem)
656 case Slice:
657 tt := (*sliceType)(unsafe.Pointer(t))
658 return toType(tt.elem)
660 panic("reflect: Elem of invalid type")
663 func (t *rtype) Field(i int) StructField {
664 if t.Kind() != Struct {
665 panic("reflect: Field of non-struct type")
667 tt := (*structType)(unsafe.Pointer(t))
668 return tt.Field(i)
671 func (t *rtype) FieldByIndex(index []int) StructField {
672 if t.Kind() != Struct {
673 panic("reflect: FieldByIndex of non-struct type")
675 tt := (*structType)(unsafe.Pointer(t))
676 return tt.FieldByIndex(index)
679 func (t *rtype) FieldByName(name string) (StructField, bool) {
680 if t.Kind() != Struct {
681 panic("reflect: FieldByName of non-struct type")
683 tt := (*structType)(unsafe.Pointer(t))
684 return tt.FieldByName(name)
687 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
688 if t.Kind() != Struct {
689 panic("reflect: FieldByNameFunc of non-struct type")
691 tt := (*structType)(unsafe.Pointer(t))
692 return tt.FieldByNameFunc(match)
695 func (t *rtype) In(i int) Type {
696 if t.Kind() != Func {
697 panic("reflect: In of non-func type")
699 tt := (*funcType)(unsafe.Pointer(t))
700 return toType(tt.in[i])
703 func (t *rtype) Key() Type {
704 if t.Kind() != Map {
705 panic("reflect: Key of non-map type")
707 tt := (*mapType)(unsafe.Pointer(t))
708 return toType(tt.key)
711 func (t *rtype) Len() int {
712 if t.Kind() != Array {
713 panic("reflect: Len of non-array type")
715 tt := (*arrayType)(unsafe.Pointer(t))
716 return int(tt.len)
719 func (t *rtype) NumField() int {
720 if t.Kind() != Struct {
721 panic("reflect: NumField of non-struct type")
723 tt := (*structType)(unsafe.Pointer(t))
724 return len(tt.fields)
727 func (t *rtype) NumIn() int {
728 if t.Kind() != Func {
729 panic("reflect: NumIn of non-func type")
731 tt := (*funcType)(unsafe.Pointer(t))
732 return len(tt.in)
735 func (t *rtype) NumOut() int {
736 if t.Kind() != Func {
737 panic("reflect: NumOut of non-func type")
739 tt := (*funcType)(unsafe.Pointer(t))
740 return len(tt.out)
743 func (t *rtype) Out(i int) Type {
744 if t.Kind() != Func {
745 panic("reflect: Out of non-func type")
747 tt := (*funcType)(unsafe.Pointer(t))
748 return toType(tt.out[i])
751 // add returns p+x.
753 // The whySafe string is ignored, so that the function still inlines
754 // as efficiently as p+x, but all call sites should use the string to
755 // record why the addition is safe, which is to say why the addition
756 // does not cause x to advance to the very end of p's allocation
757 // and therefore point incorrectly at the next block in memory.
758 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
759 return unsafe.Pointer(uintptr(p) + x)
762 func (d ChanDir) String() string {
763 switch d {
764 case SendDir:
765 return "chan<-"
766 case RecvDir:
767 return "<-chan"
768 case BothDir:
769 return "chan"
771 return "ChanDir" + strconv.Itoa(int(d))
774 // Method returns the i'th method in the type's method set.
775 func (t *interfaceType) Method(i int) (m Method) {
776 if i < 0 || i >= len(t.methods) {
777 return
779 p := &t.methods[i]
780 m.Name = *p.name
781 if p.pkgPath != nil {
782 m.PkgPath = *p.pkgPath
784 m.Type = toType(p.typ)
785 m.Index = i
786 return
789 // NumMethod returns the number of interface methods in the type's method set.
790 func (t *interfaceType) NumMethod() int { return len(t.methods) }
792 // MethodByName method with the given name in the type's method set.
793 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
794 if t == nil {
795 return
797 var p *imethod
798 for i := range t.methods {
799 p = &t.methods[i]
800 if *p.name == name {
801 return t.Method(i), true
804 return
807 // A StructField describes a single field in a struct.
808 type StructField struct {
809 // Name is the field name.
810 Name string
811 // PkgPath is the package path that qualifies a lower case (unexported)
812 // field name. It is empty for upper case (exported) field names.
813 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
814 PkgPath string
816 Type Type // field type
817 Tag StructTag // field tag string
818 Offset uintptr // offset within struct, in bytes
819 Index []int // index sequence for Type.FieldByIndex
820 Anonymous bool // is an embedded field
823 // A StructTag is the tag string in a struct field.
825 // By convention, tag strings are a concatenation of
826 // optionally space-separated key:"value" pairs.
827 // Each key is a non-empty string consisting of non-control
828 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
829 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
830 // characters and Go string literal syntax.
831 type StructTag string
833 // Get returns the value associated with key in the tag string.
834 // If there is no such key in the tag, Get returns the empty string.
835 // If the tag does not have the conventional format, the value
836 // returned by Get is unspecified. To determine whether a tag is
837 // explicitly set to the empty string, use Lookup.
838 func (tag StructTag) Get(key string) string {
839 v, _ := tag.Lookup(key)
840 return v
843 // Lookup returns the value associated with key in the tag string.
844 // If the key is present in the tag the value (which may be empty)
845 // is returned. Otherwise the returned value will be the empty string.
846 // The ok return value reports whether the value was explicitly set in
847 // the tag string. If the tag does not have the conventional format,
848 // the value returned by Lookup is unspecified.
849 func (tag StructTag) Lookup(key string) (value string, ok bool) {
850 // When modifying this code, also update the validateStructTag code
851 // in cmd/vet/structtag.go.
853 for tag != "" {
854 // Skip leading space.
855 i := 0
856 for i < len(tag) && tag[i] == ' ' {
859 tag = tag[i:]
860 if tag == "" {
861 break
864 // Scan to colon. A space, a quote or a control character is a syntax error.
865 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
866 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
867 // as it is simpler to inspect the tag's bytes than the tag's runes.
868 i = 0
869 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
872 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
873 break
875 name := string(tag[:i])
876 tag = tag[i+1:]
878 // Scan quoted string to find value.
879 i = 1
880 for i < len(tag) && tag[i] != '"' {
881 if tag[i] == '\\' {
886 if i >= len(tag) {
887 break
889 qvalue := string(tag[:i+1])
890 tag = tag[i+1:]
892 if key == name {
893 value, err := strconv.Unquote(qvalue)
894 if err != nil {
895 break
897 return value, true
900 return "", false
903 // Field returns the i'th struct field.
904 func (t *structType) Field(i int) (f StructField) {
905 if i < 0 || i >= len(t.fields) {
906 panic("reflect: Field index out of bounds")
908 p := &t.fields[i]
909 f.Type = toType(p.typ)
910 f.Name = *p.name
911 f.Anonymous = p.anon()
912 if p.pkgPath != nil {
913 f.PkgPath = *p.pkgPath
915 if p.tag != nil {
916 f.Tag = StructTag(*p.tag)
918 f.Offset = p.offset()
920 // NOTE(rsc): This is the only allocation in the interface
921 // presented by a reflect.Type. It would be nice to avoid,
922 // at least in the common cases, but we need to make sure
923 // that misbehaving clients of reflect cannot affect other
924 // uses of reflect. One possibility is CL 5371098, but we
925 // postponed that ugliness until there is a demonstrated
926 // need for the performance. This is issue 2320.
927 f.Index = []int{i}
928 return
931 // TODO(gri): Should there be an error/bool indicator if the index
932 // is wrong for FieldByIndex?
934 // FieldByIndex returns the nested field corresponding to index.
935 func (t *structType) FieldByIndex(index []int) (f StructField) {
936 f.Type = toType(&t.rtype)
937 for i, x := range index {
938 if i > 0 {
939 ft := f.Type
940 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
941 ft = ft.Elem()
943 f.Type = ft
945 f = f.Type.Field(x)
947 return
950 // A fieldScan represents an item on the fieldByNameFunc scan work list.
951 type fieldScan struct {
952 typ *structType
953 index []int
956 // FieldByNameFunc returns the struct field with a name that satisfies the
957 // match function and a boolean to indicate if the field was found.
958 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
959 // This uses the same condition that the Go language does: there must be a unique instance
960 // of the match at a given depth level. If there are multiple instances of a match at the
961 // same depth, they annihilate each other and inhibit any possible match at a lower level.
962 // The algorithm is breadth first search, one depth level at a time.
964 // The current and next slices are work queues:
965 // current lists the fields to visit on this depth level,
966 // and next lists the fields on the next lower level.
967 current := []fieldScan{}
968 next := []fieldScan{{typ: t}}
970 // nextCount records the number of times an embedded type has been
971 // encountered and considered for queueing in the 'next' slice.
972 // We only queue the first one, but we increment the count on each.
973 // If a struct type T can be reached more than once at a given depth level,
974 // then it annihilates itself and need not be considered at all when we
975 // process that next depth level.
976 var nextCount map[*structType]int
978 // visited records the structs that have been considered already.
979 // Embedded pointer fields can create cycles in the graph of
980 // reachable embedded types; visited avoids following those cycles.
981 // It also avoids duplicated effort: if we didn't find the field in an
982 // embedded type T at level 2, we won't find it in one at level 4 either.
983 visited := map[*structType]bool{}
985 for len(next) > 0 {
986 current, next = next, current[:0]
987 count := nextCount
988 nextCount = nil
990 // Process all the fields at this depth, now listed in 'current'.
991 // The loop queues embedded fields found in 'next', for processing during the next
992 // iteration. The multiplicity of the 'current' field counts is recorded
993 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
994 for _, scan := range current {
995 t := scan.typ
996 if visited[t] {
997 // We've looked through this type before, at a higher level.
998 // That higher level would shadow the lower level we're now at,
999 // so this one can't be useful to us. Ignore it.
1000 continue
1002 visited[t] = true
1003 for i := range t.fields {
1004 f := &t.fields[i]
1005 // Find name and (for anonymous field) type for field f.
1006 fname := *f.name
1007 var ntyp *rtype
1008 if f.anon() {
1009 // Anonymous field of type T or *T.
1010 ntyp = f.typ
1011 if ntyp.Kind() == Ptr {
1012 ntyp = ntyp.Elem().common()
1016 // Does it match?
1017 if match(fname) {
1018 // Potential match
1019 if count[t] > 1 || ok {
1020 // Name appeared multiple times at this level: annihilate.
1021 return StructField{}, false
1023 result = t.Field(i)
1024 result.Index = nil
1025 result.Index = append(result.Index, scan.index...)
1026 result.Index = append(result.Index, i)
1027 ok = true
1028 continue
1031 // Queue embedded struct fields for processing with next level,
1032 // but only if we haven't seen a match yet at this level and only
1033 // if the embedded types haven't already been queued.
1034 if ok || ntyp == nil || ntyp.Kind() != Struct {
1035 continue
1037 ntyp = toType(ntyp).common()
1038 styp := (*structType)(unsafe.Pointer(ntyp))
1039 if nextCount[styp] > 0 {
1040 nextCount[styp] = 2 // exact multiple doesn't matter
1041 continue
1043 if nextCount == nil {
1044 nextCount = map[*structType]int{}
1046 nextCount[styp] = 1
1047 if count[t] > 1 {
1048 nextCount[styp] = 2 // exact multiple doesn't matter
1050 var index []int
1051 index = append(index, scan.index...)
1052 index = append(index, i)
1053 next = append(next, fieldScan{styp, index})
1056 if ok {
1057 break
1060 return
1063 // FieldByName returns the struct field with the given name
1064 // and a boolean to indicate if the field was found.
1065 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1066 // Quick check for top-level name, or struct without anonymous fields.
1067 hasAnon := false
1068 if name != "" {
1069 for i := range t.fields {
1070 tf := &t.fields[i]
1071 if *tf.name == name {
1072 return t.Field(i), true
1074 if tf.anon() {
1075 hasAnon = true
1079 if !hasAnon {
1080 return
1082 return t.FieldByNameFunc(func(s string) bool { return s == name })
1085 // TypeOf returns the reflection Type that represents the dynamic type of i.
1086 // If i is a nil interface value, TypeOf returns nil.
1087 func TypeOf(i interface{}) Type {
1088 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1089 return toType(eface.typ)
1092 // ptrMap is the cache for PtrTo.
1093 var ptrMap sync.Map // map[*rtype]*ptrType
1095 // PtrTo returns the pointer type with element t.
1096 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1097 func PtrTo(t Type) Type {
1098 return t.(*rtype).ptrTo()
1101 func (t *rtype) ptrTo() *rtype {
1102 if p := t.ptrToThis; p != nil {
1103 return p
1106 // Check the cache.
1107 if pi, ok := ptrMap.Load(t); ok {
1108 return &pi.(*ptrType).rtype
1111 s := "*" + *t.string
1113 canonicalTypeLock.RLock()
1114 r, ok := canonicalType[s]
1115 canonicalTypeLock.RUnlock()
1116 if ok {
1117 p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
1118 pi, _ := ptrMap.LoadOrStore(t, p)
1119 return &pi.(*ptrType).rtype
1122 // Create a new ptrType starting with the description
1123 // of an *unsafe.Pointer.
1124 var iptr interface{} = (*unsafe.Pointer)(nil)
1125 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1126 pp := *prototype
1128 pp.string = &s
1129 pp.ptrToThis = nil
1131 // For the type structures linked into the binary, the
1132 // compiler provides a good hash of the string.
1133 // Create a good hash for the new string by using
1134 // the FNV-1 hash's mixing function to combine the
1135 // old hash and the new "*".
1136 // p.hash = fnv1(t.hash, '*')
1137 // This is the gccgo version.
1138 pp.hash = (t.hash << 4) + 9
1140 pp.uncommonType = nil
1141 pp.ptrToThis = nil
1142 pp.elem = t
1144 q := canonicalize(&pp.rtype)
1145 p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
1147 pi, _ := ptrMap.LoadOrStore(t, p)
1148 return &pi.(*ptrType).rtype
1151 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1152 func fnv1(x uint32, list ...byte) uint32 {
1153 for _, b := range list {
1154 x = x*16777619 ^ uint32(b)
1156 return x
1159 func (t *rtype) Implements(u Type) bool {
1160 if u == nil {
1161 panic("reflect: nil type passed to Type.Implements")
1163 if u.Kind() != Interface {
1164 panic("reflect: non-interface type passed to Type.Implements")
1166 return implements(u.(*rtype), t)
1169 func (t *rtype) AssignableTo(u Type) bool {
1170 if u == nil {
1171 panic("reflect: nil type passed to Type.AssignableTo")
1173 uu := u.(*rtype)
1174 return directlyAssignable(uu, t) || implements(uu, t)
1177 func (t *rtype) ConvertibleTo(u Type) bool {
1178 if u == nil {
1179 panic("reflect: nil type passed to Type.ConvertibleTo")
1181 uu := u.(*rtype)
1182 return convertOp(uu, t) != nil
1185 func (t *rtype) Comparable() bool {
1186 switch t.Kind() {
1187 case Bool, Int, Int8, Int16, Int32, Int64,
1188 Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
1189 Float32, Float64, Complex64, Complex128,
1190 Chan, Interface, Ptr, String, UnsafePointer:
1191 return true
1193 case Func, Map, Slice:
1194 return false
1196 case Array:
1197 return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
1199 case Struct:
1200 tt := (*structType)(unsafe.Pointer(t))
1201 for i := range tt.fields {
1202 if !tt.fields[i].typ.Comparable() {
1203 return false
1206 return true
1208 default:
1209 panic("reflect: impossible")
1213 // implements reports whether the type V implements the interface type T.
1214 func implements(T, V *rtype) bool {
1215 if T.Kind() != Interface {
1216 return false
1218 t := (*interfaceType)(unsafe.Pointer(T))
1219 if len(t.methods) == 0 {
1220 return true
1223 // The same algorithm applies in both cases, but the
1224 // method tables for an interface type and a concrete type
1225 // are different, so the code is duplicated.
1226 // In both cases the algorithm is a linear scan over the two
1227 // lists - T's methods and V's methods - simultaneously.
1228 // Since method tables are stored in a unique sorted order
1229 // (alphabetical, with no duplicate method names), the scan
1230 // through V's methods must hit a match for each of T's
1231 // methods along the way, or else V does not implement T.
1232 // This lets us run the scan in overall linear time instead of
1233 // the quadratic time a naive search would require.
1234 // See also ../runtime/iface.go.
1235 if V.Kind() == Interface {
1236 v := (*interfaceType)(unsafe.Pointer(V))
1237 i := 0
1238 for j := 0; j < len(v.methods); j++ {
1239 tm := &t.methods[i]
1240 vm := &v.methods[j]
1241 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.typ).common() == toType(tm.typ).common() {
1242 if i++; i >= len(t.methods) {
1243 return true
1247 return false
1250 v := V.uncommon()
1251 if v == nil {
1252 return false
1254 i := 0
1255 for j := 0; j < len(v.methods); j++ {
1256 tm := &t.methods[i]
1257 vm := &v.methods[j]
1258 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.mtyp).common() == toType(tm.typ).common() {
1259 if i++; i >= len(t.methods) {
1260 return true
1264 return false
1267 // directlyAssignable reports whether a value x of type V can be directly
1268 // assigned (using memmove) to a value of type T.
1269 // https://golang.org/doc/go_spec.html#Assignability
1270 // Ignoring the interface rules (implemented elsewhere)
1271 // and the ideal constant rules (no ideal constants at run time).
1272 func directlyAssignable(T, V *rtype) bool {
1273 // x's type V is identical to T?
1274 if T == V {
1275 return true
1278 // Otherwise at least one of T and V must be unnamed
1279 // and they must have the same kind.
1280 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
1281 return false
1284 // x's type T and V must have identical underlying types.
1285 return haveIdenticalUnderlyingType(T, V, true)
1288 func haveIdenticalType(T, V Type, cmpTags bool) bool {
1289 if cmpTags {
1290 return T == V
1293 if T.Name() != V.Name() || T.Kind() != V.Kind() {
1294 return false
1297 return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1300 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1301 if T == V {
1302 return true
1305 kind := T.Kind()
1306 if kind != V.Kind() {
1307 return false
1310 // Non-composite types of equal kind have same underlying type
1311 // (the predefined instance of the type).
1312 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1313 return true
1316 // Composite types.
1317 switch kind {
1318 case Array:
1319 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1321 case Chan:
1322 // Special case:
1323 // x is a bidirectional channel value, T is a channel type,
1324 // and x's type V and T have identical element types.
1325 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
1326 return true
1329 // Otherwise continue test for identical underlying type.
1330 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1332 case Func:
1333 t := (*funcType)(unsafe.Pointer(T))
1334 v := (*funcType)(unsafe.Pointer(V))
1335 if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
1336 return false
1338 for i, typ := range t.in {
1339 if !haveIdenticalType(typ, v.in[i], cmpTags) {
1340 return false
1343 for i, typ := range t.out {
1344 if !haveIdenticalType(typ, v.out[i], cmpTags) {
1345 return false
1348 return true
1350 case Interface:
1351 t := (*interfaceType)(unsafe.Pointer(T))
1352 v := (*interfaceType)(unsafe.Pointer(V))
1353 if len(t.methods) == 0 && len(v.methods) == 0 {
1354 return true
1356 // Might have the same methods but still
1357 // need a run time conversion.
1358 return false
1360 case Map:
1361 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1363 case Ptr, Slice:
1364 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1366 case Struct:
1367 t := (*structType)(unsafe.Pointer(T))
1368 v := (*structType)(unsafe.Pointer(V))
1369 if len(t.fields) != len(v.fields) {
1370 return false
1372 for i := range t.fields {
1373 tf := &t.fields[i]
1374 vf := &v.fields[i]
1375 if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
1376 return false
1378 if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
1379 return false
1381 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1382 return false
1384 if cmpTags && tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
1385 return false
1387 if tf.offsetAnon != vf.offsetAnon {
1388 return false
1391 return true
1394 return false
1397 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1398 var lookupCache sync.Map // map[cacheKey]*rtype
1400 // A cacheKey is the key for use in the lookupCache.
1401 // Four values describe any of the types we are looking for:
1402 // type kind, one or two subtypes, and an extra integer.
1403 type cacheKey struct {
1404 kind Kind
1405 t1 *rtype
1406 t2 *rtype
1407 extra uintptr
1410 // The funcLookupCache caches FuncOf lookups.
1411 // FuncOf does not share the common lookupCache since cacheKey is not
1412 // sufficient to represent functions unambiguously.
1413 var funcLookupCache struct {
1414 sync.Mutex // Guards stores (but not loads) on m.
1416 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1417 // Elements of m are append-only and thus safe for concurrent reading.
1418 m sync.Map
1421 // ChanOf returns the channel type with the given direction and element type.
1422 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1424 // The gc runtime imposes a limit of 64 kB on channel element types.
1425 // If t's size is equal to or exceeds this limit, ChanOf panics.
1426 func ChanOf(dir ChanDir, t Type) Type {
1427 typ := t.(*rtype)
1429 // Look in cache.
1430 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1431 if ch, ok := lookupCache.Load(ckey); ok {
1432 return ch.(*rtype)
1435 // This restriction is imposed by the gc compiler and the runtime.
1436 if typ.size >= 1<<16 {
1437 panic("reflect.ChanOf: element size too large")
1440 // Look in known types.
1441 // TODO: Precedence when constructing string.
1442 var s string
1443 switch dir {
1444 default:
1445 panic("reflect.ChanOf: invalid dir")
1446 case SendDir:
1447 s = "chan<- " + *typ.string
1448 case RecvDir:
1449 s = "<-chan " + *typ.string
1450 case BothDir:
1451 s = "chan " + *typ.string
1454 // Make a channel type.
1455 var ichan interface{} = (chan unsafe.Pointer)(nil)
1456 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1457 ch := *prototype
1458 ch.dir = uintptr(dir)
1459 ch.string = &s
1461 // gccgo uses a different hash.
1462 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1463 ch.hash = 0
1464 if dir&SendDir != 0 {
1465 ch.hash += 1
1467 if dir&RecvDir != 0 {
1468 ch.hash += 2
1470 ch.hash += typ.hash << 2
1471 ch.hash <<= 3
1472 ch.hash += 15
1474 ch.elem = typ
1475 ch.uncommonType = nil
1476 ch.ptrToThis = nil
1478 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
1479 return ti.(Type)
1482 func ismapkey(*rtype) bool // implemented in runtime
1484 // MapOf returns the map type with the given key and element types.
1485 // For example, if k represents int and e represents string,
1486 // MapOf(k, e) represents map[int]string.
1488 // If the key type is not a valid map key type (that is, if it does
1489 // not implement Go's == operator), MapOf panics.
1490 func MapOf(key, elem Type) Type {
1491 ktyp := key.(*rtype)
1492 etyp := elem.(*rtype)
1494 if !ismapkey(ktyp) {
1495 panic("reflect.MapOf: invalid key type " + ktyp.String())
1498 // Look in cache.
1499 ckey := cacheKey{Map, ktyp, etyp, 0}
1500 if mt, ok := lookupCache.Load(ckey); ok {
1501 return mt.(Type)
1504 // Look in known types.
1505 s := "map[" + *ktyp.string + "]" + *etyp.string
1507 // Make a map type.
1508 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1509 mt := **(**mapType)(unsafe.Pointer(&imap))
1510 mt.string = &s
1512 // gccgo uses a different hash
1513 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1514 mt.hash = ktyp.hash + etyp.hash + 2 + 14
1516 mt.key = ktyp
1517 mt.elem = etyp
1518 mt.uncommonType = nil
1519 mt.ptrToThis = nil
1521 mt.bucket = bucketOf(ktyp, etyp)
1522 if ktyp.size > maxKeySize {
1523 mt.keysize = uint8(ptrSize)
1524 mt.indirectkey = 1
1525 } else {
1526 mt.keysize = uint8(ktyp.size)
1527 mt.indirectkey = 0
1529 if etyp.size > maxValSize {
1530 mt.valuesize = uint8(ptrSize)
1531 mt.indirectvalue = 1
1532 } else {
1533 mt.valuesize = uint8(etyp.size)
1534 mt.indirectvalue = 0
1536 mt.bucketsize = uint16(mt.bucket.size)
1537 mt.reflexivekey = isReflexive(ktyp)
1538 mt.needkeyupdate = needKeyUpdate(ktyp)
1540 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
1541 return ti.(Type)
1544 // FuncOf returns the function type with the given argument and result types.
1545 // For example if k represents int and e represents string,
1546 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1548 // The variadic argument controls whether the function is variadic. FuncOf
1549 // panics if the in[len(in)-1] does not represent a slice and variadic is
1550 // true.
1551 func FuncOf(in, out []Type, variadic bool) Type {
1552 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1553 panic("reflect.FuncOf: last arg of variadic func must be slice")
1556 // Make a func type.
1557 var ifunc interface{} = (func())(nil)
1558 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1559 ft := new(funcType)
1560 *ft = *prototype
1562 // Build a hash and minimally populate ft.
1563 var hash uint32
1564 var fin, fout []*rtype
1565 shift := uint(1)
1566 for _, in := range in {
1567 t := in.(*rtype)
1568 fin = append(fin, t)
1569 hash += t.hash << shift
1570 shift++
1572 shift = 2
1573 for _, out := range out {
1574 t := out.(*rtype)
1575 fout = append(fout, t)
1576 hash += t.hash << shift
1577 shift++
1579 if variadic {
1580 hash++
1582 hash <<= 4
1583 hash += 8
1584 ft.hash = hash
1585 ft.in = fin
1586 ft.out = fout
1587 ft.dotdotdot = variadic
1589 // Look in cache.
1590 if ts, ok := funcLookupCache.m.Load(hash); ok {
1591 for _, t := range ts.([]*rtype) {
1592 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1593 return t
1598 // Not in cache, lock and retry.
1599 funcLookupCache.Lock()
1600 defer funcLookupCache.Unlock()
1601 if ts, ok := funcLookupCache.m.Load(hash); ok {
1602 for _, t := range ts.([]*rtype) {
1603 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1604 return t
1609 addToCache := func(tt *rtype) Type {
1610 var rts []*rtype
1611 if rti, ok := funcLookupCache.m.Load(hash); ok {
1612 rts = rti.([]*rtype)
1614 funcLookupCache.m.Store(hash, append(rts, tt))
1615 return tt
1618 str := funcStr(ft)
1620 // Populate the remaining fields of ft and store in cache.
1621 ft.string = &str
1622 ft.uncommonType = nil
1623 ft.ptrToThis = nil
1624 return addToCache(&ft.rtype)
1627 // funcStr builds a string representation of a funcType.
1628 func funcStr(ft *funcType) string {
1629 repr := make([]byte, 0, 64)
1630 repr = append(repr, "func("...)
1631 for i, t := range ft.in {
1632 if i > 0 {
1633 repr = append(repr, ", "...)
1635 if ft.dotdotdot && i == len(ft.in)-1 {
1636 repr = append(repr, "..."...)
1637 repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
1638 } else {
1639 repr = append(repr, *t.string...)
1642 repr = append(repr, ')')
1643 if l := len(ft.out); l == 1 {
1644 repr = append(repr, ' ')
1645 } else if l > 1 {
1646 repr = append(repr, " ("...)
1648 for i, t := range ft.out {
1649 if i > 0 {
1650 repr = append(repr, ", "...)
1652 repr = append(repr, *t.string...)
1654 if len(ft.out) > 1 {
1655 repr = append(repr, ')')
1657 return string(repr)
1660 // isReflexive reports whether the == operation on the type is reflexive.
1661 // That is, x == x for all values x of type t.
1662 func isReflexive(t *rtype) bool {
1663 switch t.Kind() {
1664 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
1665 return true
1666 case Float32, Float64, Complex64, Complex128, Interface:
1667 return false
1668 case Array:
1669 tt := (*arrayType)(unsafe.Pointer(t))
1670 return isReflexive(tt.elem)
1671 case Struct:
1672 tt := (*structType)(unsafe.Pointer(t))
1673 for _, f := range tt.fields {
1674 if !isReflexive(f.typ) {
1675 return false
1678 return true
1679 default:
1680 // Func, Map, Slice, Invalid
1681 panic("isReflexive called on non-key type " + t.String())
1685 // needKeyUpdate reports whether map overwrites require the key to be copied.
1686 func needKeyUpdate(t *rtype) bool {
1687 switch t.Kind() {
1688 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
1689 return false
1690 case Float32, Float64, Complex64, Complex128, Interface, String:
1691 // Float keys can be updated from +0 to -0.
1692 // String keys can be updated to use a smaller backing store.
1693 // Interfaces might have floats of strings in them.
1694 return true
1695 case Array:
1696 tt := (*arrayType)(unsafe.Pointer(t))
1697 return needKeyUpdate(tt.elem)
1698 case Struct:
1699 tt := (*structType)(unsafe.Pointer(t))
1700 for _, f := range tt.fields {
1701 if needKeyUpdate(f.typ) {
1702 return true
1705 return false
1706 default:
1707 // Func, Map, Slice, Invalid
1708 panic("needKeyUpdate called on non-key type " + t.String())
1712 // Make sure these routines stay in sync with ../../runtime/hashmap.go!
1713 // These types exist only for GC, so we only fill out GC relevant info.
1714 // Currently, that's just size and the GC program. We also fill in string
1715 // for possible debugging use.
1716 const (
1717 bucketSize uintptr = 8
1718 maxKeySize uintptr = 128
1719 maxValSize uintptr = 128
1722 func bucketOf(ktyp, etyp *rtype) *rtype {
1723 // See comment on hmap.overflow in ../runtime/hashmap.go.
1724 var kind uint8
1725 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
1726 ktyp.size <= maxKeySize && etyp.size <= maxValSize {
1727 kind = kindNoPointers
1730 if ktyp.size > maxKeySize {
1731 ktyp = PtrTo(ktyp).(*rtype)
1733 if etyp.size > maxValSize {
1734 etyp = PtrTo(etyp).(*rtype)
1737 // Prepare GC data if any.
1738 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1739 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1740 // Note that since the key and value are known to be <= 128 bytes,
1741 // they're guaranteed to have bitmaps instead of GC programs.
1742 var gcdata *byte
1743 var ptrdata uintptr
1745 size := bucketSize
1746 size = align(size, uintptr(ktyp.fieldAlign))
1747 size += bucketSize * ktyp.size
1748 size = align(size, uintptr(etyp.fieldAlign))
1749 size += bucketSize * etyp.size
1751 maxAlign := uintptr(ktyp.fieldAlign)
1752 if maxAlign < uintptr(etyp.fieldAlign) {
1753 maxAlign = uintptr(etyp.fieldAlign)
1755 if maxAlign > ptrSize {
1756 size = align(size, maxAlign)
1757 size += align(ptrSize, maxAlign) - ptrSize
1758 } else if maxAlign < ptrSize {
1759 size = align(size, ptrSize)
1760 maxAlign = ptrSize
1763 ovoff := size
1764 size += ptrSize
1766 if kind != kindNoPointers {
1767 nptr := size / ptrSize
1768 mask := make([]byte, (nptr+7)/8)
1769 psize := bucketSize
1770 psize = align(psize, uintptr(ktyp.fieldAlign))
1771 base := psize / ptrSize
1773 if ktyp.kind&kindNoPointers == 0 {
1774 if ktyp.kind&kindGCProg != 0 {
1775 panic("reflect: unexpected GC program in MapOf")
1777 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
1778 for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
1779 if (kmask[i/8]>>(i%8))&1 != 0 {
1780 for j := uintptr(0); j < bucketSize; j++ {
1781 word := base + j*ktyp.size/ptrSize + i
1782 mask[word/8] |= 1 << (word % 8)
1787 psize += bucketSize * ktyp.size
1788 psize = align(psize, uintptr(etyp.fieldAlign))
1789 base = psize / ptrSize
1791 if etyp.kind&kindNoPointers == 0 {
1792 if etyp.kind&kindGCProg != 0 {
1793 panic("reflect: unexpected GC program in MapOf")
1795 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
1796 for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
1797 if (emask[i/8]>>(i%8))&1 != 0 {
1798 for j := uintptr(0); j < bucketSize; j++ {
1799 word := base + j*etyp.size/ptrSize + i
1800 mask[word/8] |= 1 << (word % 8)
1806 word := ovoff / ptrSize
1807 mask[word/8] |= 1 << (word % 8)
1808 gcdata = &mask[0]
1809 ptrdata = (word + 1) * ptrSize
1811 // overflow word must be last
1812 if ptrdata != size {
1813 panic("reflect: bad layout computation in MapOf")
1817 b := &rtype{
1818 align: int8(maxAlign),
1819 fieldAlign: uint8(maxAlign),
1820 size: size,
1821 kind: kind,
1822 ptrdata: ptrdata,
1823 gcdata: gcdata,
1825 s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
1826 b.string = &s
1827 return b
1830 // SliceOf returns the slice type with element type t.
1831 // For example, if t represents int, SliceOf(t) represents []int.
1832 func SliceOf(t Type) Type {
1833 typ := t.(*rtype)
1835 // Look in cache.
1836 ckey := cacheKey{Slice, typ, nil, 0}
1837 if slice, ok := lookupCache.Load(ckey); ok {
1838 return slice.(Type)
1841 // Look in known types.
1842 s := "[]" + *typ.string
1844 // Make a slice type.
1845 var islice interface{} = ([]unsafe.Pointer)(nil)
1846 prototype := *(**sliceType)(unsafe.Pointer(&islice))
1847 slice := *prototype
1848 slice.string = &s
1850 // gccgo uses a different hash.
1851 // slice.hash = fnv1(typ.hash, '[')
1852 slice.hash = typ.hash + 1 + 13
1854 slice.elem = typ
1855 slice.uncommonType = nil
1856 slice.ptrToThis = nil
1858 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
1859 return ti.(Type)
1862 // The structLookupCache caches StructOf lookups.
1863 // StructOf does not share the common lookupCache since we need to pin
1864 // the memory associated with *structTypeFixedN.
1865 var structLookupCache struct {
1866 sync.Mutex // Guards stores (but not loads) on m.
1868 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
1869 // Elements in m are append-only and thus safe for concurrent reading.
1870 m sync.Map
1873 // isLetter returns true if a given 'rune' is classified as a Letter.
1874 func isLetter(ch rune) bool {
1875 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
1878 // isValidFieldName checks if a string is a valid (struct) field name or not.
1880 // According to the language spec, a field name should be an identifier.
1882 // identifier = letter { letter | unicode_digit } .
1883 // letter = unicode_letter | "_" .
1884 func isValidFieldName(fieldName string) bool {
1885 for i, c := range fieldName {
1886 if i == 0 && !isLetter(c) {
1887 return false
1890 if !(isLetter(c) || unicode.IsDigit(c)) {
1891 return false
1895 return len(fieldName) > 0
1898 // StructOf returns the struct type containing fields.
1899 // The Offset and Index fields are ignored and computed as they would be
1900 // by the compiler.
1902 // StructOf currently does not generate wrapper methods for embedded fields.
1903 // This limitation may be lifted in a future version.
1904 func StructOf(fields []StructField) Type {
1905 var (
1906 hash = uint32(0)
1907 size uintptr
1908 typalign int8
1909 comparable = true
1910 hashable = true
1912 fs = make([]structField, len(fields))
1913 repr = make([]byte, 0, 64)
1914 fset = map[string]struct{}{} // fields' names
1916 hasPtr = false // records whether at least one struct-field is a pointer
1917 hasGCProg = false // records whether a struct-field type has a GCProg
1920 lastzero := uintptr(0)
1921 repr = append(repr, "struct {"...)
1922 for i, field := range fields {
1923 if field.Name == "" {
1924 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
1926 if !isValidFieldName(field.Name) {
1927 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
1929 if field.Type == nil {
1930 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
1932 f := runtimeStructField(field)
1933 ft := f.typ
1934 if ft.kind&kindGCProg != 0 {
1935 hasGCProg = true
1937 if ft.pointers() {
1938 hasPtr = true
1941 // Update string and hash
1942 name := *f.name
1943 hash = (hash << 1) + ft.hash
1944 if !f.anon() {
1945 repr = append(repr, (" " + name)...)
1946 } else {
1947 // Embedded field
1948 repr = append(repr, " ?"...)
1949 if f.typ.Kind() == Ptr {
1950 // Embedded ** and *interface{} are illegal
1951 elem := ft.Elem()
1952 if k := elem.Kind(); k == Ptr || k == Interface {
1953 panic("reflect.StructOf: illegal anonymous field type " + ft.String())
1955 name = elem.String()
1956 } else {
1957 name = ft.String()
1960 switch f.typ.Kind() {
1961 case Interface:
1962 ift := (*interfaceType)(unsafe.Pointer(ft))
1963 if len(ift.methods) > 0 {
1964 panic("reflect.StructOf: embedded field with methods not implemented")
1966 case Ptr:
1967 ptr := (*ptrType)(unsafe.Pointer(ft))
1968 if unt := ptr.uncommon(); unt != nil {
1969 if len(unt.methods) > 0 {
1970 panic("reflect.StructOf: embedded field with methods not implemented")
1973 if unt := ptr.elem.uncommon(); unt != nil {
1974 if len(unt.methods) > 0 {
1975 panic("reflect.StructOf: embedded field with methods not implemented")
1978 default:
1979 if unt := ft.uncommon(); unt != nil {
1980 if len(unt.methods) > 0 {
1981 panic("reflect.StructOf: embedded field with methods not implemented")
1986 if _, dup := fset[name]; dup {
1987 panic("reflect.StructOf: duplicate field " + name)
1989 fset[name] = struct{}{}
1991 repr = append(repr, (" " + ft.String())...)
1992 if f.tag != nil {
1993 repr = append(repr, (" " + strconv.Quote(*f.tag))...)
1995 if i < len(fields)-1 {
1996 repr = append(repr, ';')
1999 comparable = comparable && (ft.equalfn != nil)
2000 hashable = hashable && (ft.hashfn != nil)
2002 offset := align(size, uintptr(ft.fieldAlign))
2003 if int8(ft.fieldAlign) > typalign {
2004 typalign = int8(ft.fieldAlign)
2006 size = offset + ft.size
2007 f.offsetAnon |= offset << 1
2009 if ft.size == 0 {
2010 lastzero = size
2013 fs[i] = f
2016 if size > 0 && lastzero == size {
2017 // This is a non-zero sized struct that ends in a
2018 // zero-sized field. We add an extra byte of padding,
2019 // to ensure that taking the address of the final
2020 // zero-sized field can't manufacture a pointer to the
2021 // next object in the heap. See issue 9401.
2022 size++
2025 if len(fs) > 0 {
2026 repr = append(repr, ' ')
2028 repr = append(repr, '}')
2029 hash <<= 2
2030 str := string(repr)
2032 // Round the size up to be a multiple of the alignment.
2033 size = align(size, uintptr(typalign))
2035 // Make the struct type.
2036 var istruct interface{} = struct{}{}
2037 prototype := *(**structType)(unsafe.Pointer(&istruct))
2038 typ := new(structType)
2039 *typ = *prototype
2040 typ.fields = fs
2042 // Look in cache.
2043 if ts, ok := structLookupCache.m.Load(hash); ok {
2044 for _, st := range ts.([]Type) {
2045 t := st.common()
2046 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2047 return t
2052 // Not in cache, lock and retry.
2053 structLookupCache.Lock()
2054 defer structLookupCache.Unlock()
2055 if ts, ok := structLookupCache.m.Load(hash); ok {
2056 for _, st := range ts.([]Type) {
2057 t := st.common()
2058 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2059 return t
2064 addToCache := func(t Type) Type {
2065 var ts []Type
2066 if ti, ok := structLookupCache.m.Load(hash); ok {
2067 ts = ti.([]Type)
2069 structLookupCache.m.Store(hash, append(ts, t))
2070 return t
2073 typ.string = &str
2074 typ.hash = hash
2075 typ.size = size
2076 typ.align = typalign
2077 typ.fieldAlign = uint8(typalign)
2078 if !hasPtr {
2079 typ.kind |= kindNoPointers
2080 } else {
2081 typ.kind &^= kindNoPointers
2084 if hasGCProg {
2085 lastPtrField := 0
2086 for i, ft := range fs {
2087 if ft.typ.pointers() {
2088 lastPtrField = i
2091 prog := []byte{0, 0, 0, 0} // will be length of prog
2092 for i, ft := range fs {
2093 if i > lastPtrField {
2094 // gcprog should not include anything for any field after
2095 // the last field that contains pointer data
2096 break
2098 // FIXME(sbinet) handle padding, fields smaller than a word
2099 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:]
2100 elemPtrs := ft.typ.ptrdata / ptrSize
2101 switch {
2102 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0:
2103 // Element is small with pointer mask; use as literal bits.
2104 mask := elemGC
2105 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2106 var n uintptr
2107 for n := elemPtrs; n > 120; n -= 120 {
2108 prog = append(prog, 120)
2109 prog = append(prog, mask[:15]...)
2110 mask = mask[15:]
2112 prog = append(prog, byte(n))
2113 prog = append(prog, mask[:(n+7)/8]...)
2114 case ft.typ.kind&kindGCProg != 0:
2115 // Element has GC program; emit one element.
2116 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2117 prog = append(prog, elemProg...)
2119 // Pad from ptrdata to size.
2120 elemWords := ft.typ.size / ptrSize
2121 if elemPtrs < elemWords {
2122 // Emit literal 0 bit, then repeat as needed.
2123 prog = append(prog, 0x01, 0x00)
2124 if elemPtrs+1 < elemWords {
2125 prog = append(prog, 0x81)
2126 prog = appendVarint(prog, elemWords-elemPtrs-1)
2130 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2131 typ.kind |= kindGCProg
2132 typ.gcdata = &prog[0]
2133 } else {
2134 typ.kind &^= kindGCProg
2135 bv := new(bitVector)
2136 addTypeBits(bv, 0, typ.common())
2137 if len(bv.data) > 0 {
2138 typ.gcdata = &bv.data[0]
2141 typ.ptrdata = typeptrdata(typ.common())
2143 if hashable {
2144 typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
2145 o := seed
2146 for _, ft := range typ.fields {
2147 pi := add(p, ft.offset(), "&x.field safe")
2148 o = ft.typ.hashfn(pi, o)
2150 return o
2152 } else {
2153 typ.hashfn = nil
2156 if comparable {
2157 typ.equalfn = func(p, q unsafe.Pointer) bool {
2158 for _, ft := range typ.fields {
2159 pi := add(p, ft.offset(), "&x.field safe")
2160 qi := add(q, ft.offset(), "&x.field safe")
2161 if !ft.typ.equalfn(pi, qi) {
2162 return false
2165 return true
2167 } else {
2168 typ.equalfn = nil
2171 typ.kind &^= kindDirectIface
2172 typ.uncommonType = nil
2173 typ.ptrToThis = nil
2175 return addToCache(&typ.rtype)
2178 func runtimeStructField(field StructField) structField {
2179 if field.PkgPath != "" {
2180 panic("reflect.StructOf: StructOf does not allow unexported fields")
2183 // Best-effort check for misuse.
2184 // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
2185 c := field.Name[0]
2186 if 'a' <= c && c <= 'z' || c == '_' {
2187 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2190 offsetAnon := uintptr(0)
2191 if field.Anonymous {
2192 offsetAnon |= 1
2195 s := field.Name
2196 name := &s
2198 var tag *string
2199 if field.Tag != "" {
2200 st := string(field.Tag)
2201 tag = &st
2204 return structField{
2205 name: name,
2206 pkgPath: nil,
2207 typ: field.Type.common(),
2208 tag: tag,
2209 offsetAnon: offsetAnon,
2213 // typeptrdata returns the length in bytes of the prefix of t
2214 // containing pointer data. Anything after this offset is scalar data.
2215 // keep in sync with ../cmd/compile/internal/gc/reflect.go
2216 func typeptrdata(t *rtype) uintptr {
2217 if !t.pointers() {
2218 return 0
2220 switch t.Kind() {
2221 case Struct:
2222 st := (*structType)(unsafe.Pointer(t))
2223 // find the last field that has pointers.
2224 field := 0
2225 for i := range st.fields {
2226 ft := st.fields[i].typ
2227 if ft.pointers() {
2228 field = i
2231 f := st.fields[field]
2232 return f.offset() + f.typ.ptrdata
2234 default:
2235 panic("reflect.typeptrdata: unexpected type, " + t.String())
2239 // See cmd/compile/internal/gc/reflect.go for derivation of constant.
2240 const maxPtrmaskBytes = 2048
2242 // ArrayOf returns the array type with the given count and element type.
2243 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2245 // If the resulting type would be larger than the available address space,
2246 // ArrayOf panics.
2247 func ArrayOf(count int, elem Type) Type {
2248 typ := elem.(*rtype)
2250 // Look in cache.
2251 ckey := cacheKey{Array, typ, nil, uintptr(count)}
2252 if array, ok := lookupCache.Load(ckey); ok {
2253 return array.(Type)
2256 // Look in known types.
2257 s := "[" + strconv.Itoa(count) + "]" + *typ.string
2259 // Make an array type.
2260 var iarray interface{} = [1]unsafe.Pointer{}
2261 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2262 array := *prototype
2263 array.string = &s
2265 // gccgo uses a different hash.
2266 // array.hash = fnv1(typ.hash, '[')
2267 // for n := uint32(count); n > 0; n >>= 8 {
2268 // array.hash = fnv1(array.hash, byte(n))
2269 // }
2270 // array.hash = fnv1(array.hash, ']')
2271 array.hash = typ.hash + 1 + 13
2273 array.elem = typ
2274 array.ptrToThis = nil
2275 if typ.size > 0 {
2276 max := ^uintptr(0) / typ.size
2277 if uintptr(count) > max {
2278 panic("reflect.ArrayOf: array size would exceed virtual address space")
2281 array.size = typ.size * uintptr(count)
2282 if count > 0 && typ.ptrdata != 0 {
2283 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
2285 array.align = typ.align
2286 array.fieldAlign = typ.fieldAlign
2287 array.uncommonType = nil
2288 array.len = uintptr(count)
2289 array.slice = SliceOf(elem).(*rtype)
2291 array.kind &^= kindNoPointers
2292 switch {
2293 case typ.kind&kindNoPointers != 0 || array.size == 0:
2294 // No pointers.
2295 array.kind |= kindNoPointers
2296 array.gcdata = nil
2297 array.ptrdata = 0
2299 case count == 1:
2300 // In memory, 1-element array looks just like the element.
2301 array.kind |= typ.kind & kindGCProg
2302 array.gcdata = typ.gcdata
2303 array.ptrdata = typ.ptrdata
2305 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
2306 // Element is small with pointer mask; array is still small.
2307 // Create direct pointer mask by turning each 1 bit in elem
2308 // into count 1 bits in larger mask.
2309 mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
2310 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2311 elemWords := typ.size / ptrSize
2312 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ {
2313 if (elemMask[j/8]>>(j%8))&1 != 0 {
2314 for i := uintptr(0); i < array.len; i++ {
2315 k := i*elemWords + j
2316 mask[k/8] |= 1 << (k % 8)
2320 array.gcdata = &mask[0]
2322 default:
2323 // Create program that emits one element
2324 // and then repeats to make the array.
2325 prog := []byte{0, 0, 0, 0} // will be length of prog
2326 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2327 elemPtrs := typ.ptrdata / ptrSize
2328 if typ.kind&kindGCProg == 0 {
2329 // Element is small with pointer mask; use as literal bits.
2330 mask := elemGC
2331 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2332 var n uintptr
2333 for n = elemPtrs; n > 120; n -= 120 {
2334 prog = append(prog, 120)
2335 prog = append(prog, mask[:15]...)
2336 mask = mask[15:]
2338 prog = append(prog, byte(n))
2339 prog = append(prog, mask[:(n+7)/8]...)
2340 } else {
2341 // Element has GC program; emit one element.
2342 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2343 prog = append(prog, elemProg...)
2345 // Pad from ptrdata to size.
2346 elemWords := typ.size / ptrSize
2347 if elemPtrs < elemWords {
2348 // Emit literal 0 bit, then repeat as needed.
2349 prog = append(prog, 0x01, 0x00)
2350 if elemPtrs+1 < elemWords {
2351 prog = append(prog, 0x81)
2352 prog = appendVarint(prog, elemWords-elemPtrs-1)
2355 // Repeat count-1 times.
2356 if elemWords < 0x80 {
2357 prog = append(prog, byte(elemWords|0x80))
2358 } else {
2359 prog = append(prog, 0x80)
2360 prog = appendVarint(prog, elemWords)
2362 prog = appendVarint(prog, uintptr(count)-1)
2363 prog = append(prog, 0)
2364 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2365 array.kind |= kindGCProg
2366 array.gcdata = &prog[0]
2367 array.ptrdata = array.size // overestimate but ok; must match program
2370 array.kind &^= kindDirectIface
2372 esize := typ.size
2374 if typ.equalfn == nil {
2375 array.equalfn = nil
2376 } else {
2377 eequal := typ.equalfn
2378 array.equalfn = func(p, q unsafe.Pointer) bool {
2379 for i := 0; i < count; i++ {
2380 pi := arrayAt(p, i, esize, "i < count")
2381 qi := arrayAt(q, i, esize, "i < count")
2382 if !eequal(pi, qi) {
2383 return false
2386 return true
2390 if typ.hashfn == nil {
2391 array.hashfn = nil
2392 } else {
2393 ehash := typ.hashfn
2394 array.hashfn = func(ptr unsafe.Pointer, seed uintptr) uintptr {
2395 o := seed
2396 for i := 0; i < count; i++ {
2397 o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
2399 return o
2403 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
2404 return ti.(Type)
2407 func appendVarint(x []byte, v uintptr) []byte {
2408 for ; v >= 0x80; v >>= 7 {
2409 x = append(x, byte(v|0x80))
2411 x = append(x, byte(v))
2412 return x
2415 // toType converts from a *rtype to a Type that can be returned
2416 // to the client of package reflect. In gc, the only concern is that
2417 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2418 // function takes care of ensuring that multiple *rtype for the same
2419 // type are coalesced into a single Type.
2420 var canonicalType = make(map[string]Type)
2422 var canonicalTypeLock sync.RWMutex
2424 func canonicalize(t Type) Type {
2425 if t == nil {
2426 return nil
2428 s := t.rawString()
2429 canonicalTypeLock.RLock()
2430 if r, ok := canonicalType[s]; ok {
2431 canonicalTypeLock.RUnlock()
2432 return r
2434 canonicalTypeLock.RUnlock()
2435 canonicalTypeLock.Lock()
2436 if r, ok := canonicalType[s]; ok {
2437 canonicalTypeLock.Unlock()
2438 return r
2440 canonicalType[s] = t
2441 canonicalTypeLock.Unlock()
2442 return t
2445 func toType(p *rtype) Type {
2446 if p == nil {
2447 return nil
2449 return canonicalize(p)
2452 // ifaceIndir reports whether t is stored indirectly in an interface value.
2453 func ifaceIndir(t *rtype) bool {
2454 return t.kind&kindDirectIface == 0
2457 // Layout matches runtime.gobitvector (well enough).
2458 type bitVector struct {
2459 n uint32 // number of bits
2460 data []byte
2463 // append a bit to the bitmap.
2464 func (bv *bitVector) append(bit uint8) {
2465 if bv.n%8 == 0 {
2466 bv.data = append(bv.data, 0)
2468 bv.data[bv.n/8] |= bit << (bv.n % 8)
2469 bv.n++
2472 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
2473 if t.kind&kindNoPointers != 0 {
2474 return
2477 switch Kind(t.kind & kindMask) {
2478 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
2479 // 1 pointer at start of representation
2480 for bv.n < uint32(offset/uintptr(ptrSize)) {
2481 bv.append(0)
2483 bv.append(1)
2485 case Interface:
2486 // 2 pointers
2487 for bv.n < uint32(offset/uintptr(ptrSize)) {
2488 bv.append(0)
2490 bv.append(1)
2491 bv.append(1)
2493 case Array:
2494 // repeat inner type
2495 tt := (*arrayType)(unsafe.Pointer(t))
2496 for i := 0; i < int(tt.len); i++ {
2497 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
2500 case Struct:
2501 // apply fields
2502 tt := (*structType)(unsafe.Pointer(t))
2503 for i := range tt.fields {
2504 f := &tt.fields[i]
2505 addTypeBits(bv, offset+f.offset(), f.typ)