libgo: update to go1.9
[official-gcc.git] / libgo / go / reflect / type.go
blob664d9717a0654165dd1c3637ab7f842bdac59414
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
9 //
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
12 // for that type.
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
16 package reflect
18 import (
19 "strconv"
20 "sync"
21 "unicode"
22 "unicode/utf8"
23 "unsafe"
26 // Type is the representation of a Go type.
28 // Not all methods apply to all kinds of types. Restrictions,
29 // if any, are noted in the documentation for each method.
30 // Use the Kind method to find out the kind of type before
31 // calling kind-specific methods. Calling a method
32 // inappropriate to the kind of type causes a run-time panic.
34 // Type values are comparable, such as with the == operator.
35 // Two Type values are equal if they represent identical types.
36 type Type interface {
37 // Methods applicable to all types.
39 // Align returns the alignment in bytes of a value of
40 // this type when allocated in memory.
41 Align() int
43 // FieldAlign returns the alignment in bytes of a value of
44 // this type when used as a field in a struct.
45 FieldAlign() int
47 // Method returns the i'th method in the type's method set.
48 // It panics if i is not in the range [0, NumMethod()).
50 // For a non-interface type T or *T, the returned Method's Type and Func
51 // fields describe a function whose first argument is the receiver.
53 // For an interface type, the returned Method's Type field gives the
54 // method signature, without a receiver, and the Func field is nil.
55 Method(int) Method
57 // MethodByName returns the method with that name in the type's
58 // method set and a boolean indicating if the method was found.
60 // For a non-interface type T or *T, the returned Method's Type and Func
61 // fields describe a function whose first argument is the receiver.
63 // For an interface type, the returned Method's Type field gives the
64 // method signature, without a receiver, and the Func field is nil.
65 MethodByName(string) (Method, bool)
67 // NumMethod returns the number of exported methods in the type's method set.
68 NumMethod() int
70 // Name returns the type's name within its package.
71 // It returns an empty string for unnamed types.
72 Name() string
74 // PkgPath returns a named type's package path, that is, the import path
75 // that uniquely identifies the package, such as "encoding/base64".
76 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int),
77 // the package path will be the empty string.
78 PkgPath() string
80 // Size returns the number of bytes needed to store
81 // a value of the given type; it is analogous to unsafe.Sizeof.
82 Size() uintptr
84 // String returns a string representation of the type.
85 // The string representation may use shortened package names
86 // (e.g., base64 instead of "encoding/base64") and is not
87 // guaranteed to be unique among types. To test for type identity,
88 // compare the Types directly.
89 String() string
91 // Used internally by gccgo--the string retaining quoting.
92 rawString() string
94 // Kind returns the specific kind of this type.
95 Kind() Kind
97 // Implements reports whether the type implements the interface type u.
98 Implements(u Type) bool
100 // AssignableTo reports whether a value of the type is assignable to type u.
101 AssignableTo(u Type) bool
103 // ConvertibleTo reports whether a value of the type is convertible to type u.
104 ConvertibleTo(u Type) bool
106 // Comparable reports whether values of this type are comparable.
107 Comparable() bool
109 // Methods applicable only to some types, depending on Kind.
110 // The methods allowed for each kind are:
112 // Int*, Uint*, Float*, Complex*: Bits
113 // Array: Elem, Len
114 // Chan: ChanDir, Elem
115 // Func: In, NumIn, Out, NumOut, IsVariadic.
116 // Map: Key, Elem
117 // Ptr: Elem
118 // Slice: Elem
119 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
121 // Bits returns the size of the type in bits.
122 // It panics if the type's Kind is not one of the
123 // sized or unsized Int, Uint, Float, or Complex kinds.
124 Bits() int
126 // ChanDir returns a channel type's direction.
127 // It panics if the type's Kind is not Chan.
128 ChanDir() ChanDir
130 // IsVariadic reports whether a function type's final input parameter
131 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
132 // implicit actual type []T.
134 // For concreteness, if t represents func(x int, y ... float64), then
136 // t.NumIn() == 2
137 // t.In(0) is the reflect.Type for "int"
138 // t.In(1) is the reflect.Type for "[]float64"
139 // t.IsVariadic() == true
141 // IsVariadic panics if the type's Kind is not Func.
142 IsVariadic() bool
144 // Elem returns a type's element type.
145 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
146 Elem() Type
148 // Field returns a struct type's i'th field.
149 // It panics if the type's Kind is not Struct.
150 // It panics if i is not in the range [0, NumField()).
151 Field(i int) StructField
153 // FieldByIndex returns the nested field corresponding
154 // to the index sequence. It is equivalent to calling Field
155 // successively for each index i.
156 // It panics if the type's Kind is not Struct.
157 FieldByIndex(index []int) StructField
159 // FieldByName returns the struct field with the given name
160 // and a boolean indicating if the field was found.
161 FieldByName(name string) (StructField, bool)
163 // FieldByNameFunc returns the struct field with a name
164 // that satisfies the match function and a boolean indicating if
165 // the field was found.
167 // FieldByNameFunc considers the fields in the struct itself
168 // and then the fields in any anonymous structs, in breadth first order,
169 // stopping at the shallowest nesting depth containing one or more
170 // fields satisfying the match function. If multiple fields at that depth
171 // satisfy the match function, they cancel each other
172 // and FieldByNameFunc returns no match.
173 // This behavior mirrors Go's handling of name lookup in
174 // structs containing anonymous fields.
175 FieldByNameFunc(match func(string) bool) (StructField, bool)
177 // In returns the type of a function type's i'th input parameter.
178 // It panics if the type's Kind is not Func.
179 // It panics if i is not in the range [0, NumIn()).
180 In(i int) Type
182 // Key returns a map type's key type.
183 // It panics if the type's Kind is not Map.
184 Key() Type
186 // Len returns an array type's length.
187 // It panics if the type's Kind is not Array.
188 Len() int
190 // NumField returns a struct type's field count.
191 // It panics if the type's Kind is not Struct.
192 NumField() int
194 // NumIn returns a function type's input parameter count.
195 // It panics if the type's Kind is not Func.
196 NumIn() int
198 // NumOut returns a function type's output parameter count.
199 // It panics if the type's Kind is not Func.
200 NumOut() int
202 // Out returns the type of a function type's i'th output parameter.
203 // It panics if the type's Kind is not Func.
204 // It panics if i is not in the range [0, NumOut()).
205 Out(i int) Type
207 common() *rtype
208 uncommon() *uncommonType
211 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
212 // if the names are equal, even if they are unexported names originating
213 // in different packages. The practical effect of this is that the result of
214 // t.FieldByName("x") is not well defined if the struct type t contains
215 // multiple fields named x (embedded from different packages).
216 // FieldByName may return one of the fields named x or may report that there are none.
217 // See golang.org/issue/4876 for more details.
220 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
221 * A few are known to ../runtime/type.go to convey to debuggers.
222 * They are also known to ../runtime/type.go.
225 // A Kind represents the specific kind of type that a Type represents.
226 // The zero Kind is not a valid kind.
227 type Kind uint
229 const (
230 Invalid Kind = iota
231 Bool
233 Int8
234 Int16
235 Int32
236 Int64
237 Uint
238 Uint8
239 Uint16
240 Uint32
241 Uint64
242 Uintptr
243 Float32
244 Float64
245 Complex64
246 Complex128
247 Array
248 Chan
249 Func
250 Interface
253 Slice
254 String
255 Struct
256 UnsafePointer
259 // rtype is the common implementation of most values.
260 // It is embedded in other, public struct types, but always
261 // with a unique tag like `reflect:"array"` or `reflect:"ptr"`
262 // so that code cannot convert from, say, *arrayType to *ptrType.
264 // rtype must be kept in sync with ../runtime/type.go:/^type._type.
265 type rtype struct {
266 size uintptr
267 ptrdata uintptr // size of memory prefix holding all pointers
268 hash uint32 // hash of type; avoids computation in hash tables
269 kind uint8 // enumeration for C
270 align int8 // alignment of variable with this type
271 fieldAlign uint8 // alignment of struct field with this type
272 _ uint8 // unused/padding
274 hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
275 equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
277 gcdata *byte // garbage collection data
278 string *string // string form; unnecessary but undeniably useful
279 *uncommonType // (relatively) uncommon fields
280 ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
283 // Method on non-interface type
284 type method struct {
285 name *string // name of method
286 pkgPath *string // nil for exported Names; otherwise import path
287 mtyp *rtype // method type (without receiver)
288 typ *rtype // .(*FuncType) underneath (with receiver)
289 tfn unsafe.Pointer // fn used for normal method call
292 // uncommonType is present only for types with names or methods
293 // (if T is a named type, the uncommonTypes for T and *T have methods).
294 // Using a pointer to this struct reduces the overall size required
295 // to describe an unnamed type with no methods.
296 type uncommonType struct {
297 name *string // name of type
298 pkgPath *string // import path; nil for built-in types like int, string
299 methods []method // methods associated with type
302 // ChanDir represents a channel type's direction.
303 type ChanDir int
305 const (
306 RecvDir ChanDir = 1 << iota // <-chan
307 SendDir // chan<-
308 BothDir = RecvDir | SendDir // chan
311 // arrayType represents a fixed array type.
312 type arrayType struct {
313 rtype `reflect:"array"`
314 elem *rtype // array element type
315 slice *rtype // slice type
316 len uintptr
319 // chanType represents a channel type.
320 type chanType struct {
321 rtype `reflect:"chan"`
322 elem *rtype // channel element type
323 dir uintptr // channel direction (ChanDir)
326 // funcType represents a function type.
327 type funcType struct {
328 rtype `reflect:"func"`
329 dotdotdot bool // last input parameter is ...
330 in []*rtype // input parameter types
331 out []*rtype // output parameter types
334 // imethod represents a method on an interface type
335 type imethod struct {
336 name *string // name of method
337 pkgPath *string // nil for exported Names; otherwise import path
338 typ *rtype // .(*FuncType) underneath
341 // interfaceType represents an interface type.
342 type interfaceType struct {
343 rtype `reflect:"interface"`
344 methods []imethod // sorted by hash
347 // mapType represents a map type.
348 type mapType struct {
349 rtype `reflect:"map"`
350 key *rtype // map key type
351 elem *rtype // map element (value) type
352 bucket *rtype // internal bucket structure
353 hmap *rtype // internal map header
354 keysize uint8 // size of key slot
355 indirectkey uint8 // store ptr to key instead of key itself
356 valuesize uint8 // size of value slot
357 indirectvalue uint8 // store ptr to value instead of value itself
358 bucketsize uint16 // size of bucket
359 reflexivekey bool // true if k==k for all keys
360 needkeyupdate bool // true if we need to update key on an overwrite
363 // ptrType represents a pointer type.
364 type ptrType struct {
365 rtype `reflect:"ptr"`
366 elem *rtype // pointer element (pointed at) type
369 // sliceType represents a slice type.
370 type sliceType struct {
371 rtype `reflect:"slice"`
372 elem *rtype // slice element type
375 // Struct field
376 type structField struct {
377 name *string // name is always non-empty
378 pkgPath *string // nil for exported Names; otherwise import path
379 typ *rtype // type of field
380 tag *string // nil if no tag
381 offsetAnon uintptr // byte offset of field<<1 | isAnonymous
384 func (f *structField) offset() uintptr {
385 return f.offsetAnon >> 1
388 func (f *structField) anon() bool {
389 return f.offsetAnon&1 != 0
392 // structType represents a struct type.
393 type structType struct {
394 rtype `reflect:"struct"`
395 fields []structField // sorted by offset
399 * The compiler knows the exact layout of all the data structures above.
400 * The compiler does not know about the data structures and methods below.
403 // Method represents a single method.
404 type Method struct {
405 // Name is the method name.
406 // PkgPath is the package path that qualifies a lower case (unexported)
407 // method name. It is empty for upper case (exported) method names.
408 // The combination of PkgPath and Name uniquely identifies a method
409 // in a method set.
410 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
411 Name string
412 PkgPath string
414 Type Type // method type
415 Func Value // func with receiver as first argument
416 Index int // index for Type.Method
419 const (
420 kindDirectIface = 1 << 5
421 kindGCProg = 1 << 6 // Type.gc points to GC program
422 kindNoPointers = 1 << 7
423 kindMask = (1 << 5) - 1
426 func (k Kind) String() string {
427 if int(k) < len(kindNames) {
428 return kindNames[k]
430 return "kind" + strconv.Itoa(int(k))
433 var kindNames = []string{
434 Invalid: "invalid",
435 Bool: "bool",
436 Int: "int",
437 Int8: "int8",
438 Int16: "int16",
439 Int32: "int32",
440 Int64: "int64",
441 Uint: "uint",
442 Uint8: "uint8",
443 Uint16: "uint16",
444 Uint32: "uint32",
445 Uint64: "uint64",
446 Uintptr: "uintptr",
447 Float32: "float32",
448 Float64: "float64",
449 Complex64: "complex64",
450 Complex128: "complex128",
451 Array: "array",
452 Chan: "chan",
453 Func: "func",
454 Interface: "interface",
455 Map: "map",
456 Ptr: "ptr",
457 Slice: "slice",
458 String: "string",
459 Struct: "struct",
460 UnsafePointer: "unsafe.Pointer",
463 func (t *uncommonType) uncommon() *uncommonType {
464 return t
467 func (t *uncommonType) PkgPath() string {
468 if t == nil || t.pkgPath == nil {
469 return ""
471 return *t.pkgPath
474 func (t *uncommonType) Name() string {
475 if t == nil || t.name == nil {
476 return ""
478 return *t.name
481 func (t *rtype) rawString() string { return *t.string }
483 func (t *rtype) String() string {
484 // For gccgo, strip out quoted strings.
485 s := *t.string
486 var q bool
487 r := make([]byte, len(s))
488 j := 0
489 for i := 0; i < len(s); i++ {
490 if s[i] == '\t' {
491 q = !q
492 } else if !q {
493 r[j] = s[i]
497 return string(r[:j])
500 func (t *rtype) Size() uintptr { return t.size }
502 func (t *rtype) Bits() int {
503 if t == nil {
504 panic("reflect: Bits of nil Type")
506 k := t.Kind()
507 if k < Int || k > Complex128 {
508 panic("reflect: Bits of non-arithmetic Type " + t.String())
510 return int(t.size) * 8
513 func (t *rtype) Align() int { return int(t.align) }
515 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
517 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
519 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
521 func (t *rtype) common() *rtype { return t }
523 var methodCache sync.Map // map[*rtype][]method
525 func (t *rtype) exportedMethods() []method {
526 methodsi, found := methodCache.Load(t)
527 if found {
528 return methodsi.([]method)
531 ut := t.uncommon()
532 if ut == nil {
533 return nil
535 allm := ut.methods
536 allExported := true
537 for _, m := range allm {
538 if m.pkgPath != nil {
539 allExported = false
540 break
543 var methods []method
544 if allExported {
545 methods = allm
546 } else {
547 methods = make([]method, 0, len(allm))
548 for _, m := range allm {
549 if m.pkgPath == nil {
550 methods = append(methods, m)
553 methods = methods[:len(methods):len(methods)]
556 methodsi, _ = methodCache.LoadOrStore(t, methods)
557 return methodsi.([]method)
560 func (t *rtype) NumMethod() int {
561 if t.Kind() == Interface {
562 tt := (*interfaceType)(unsafe.Pointer(t))
563 return tt.NumMethod()
565 if t.uncommonType == nil {
566 return 0 // avoid methodCache synchronization
568 return len(t.exportedMethods())
571 func (t *rtype) Method(i int) (m Method) {
572 if t.Kind() == Interface {
573 tt := (*interfaceType)(unsafe.Pointer(t))
574 return tt.Method(i)
576 methods := t.exportedMethods()
577 if i < 0 || i >= len(methods) {
578 panic("reflect: Method index out of range")
580 p := methods[i]
581 if p.name != nil {
582 m.Name = *p.name
584 fl := flag(Func)
585 mt := p.typ
586 m.Type = toType(mt)
587 x := new(unsafe.Pointer)
588 *x = unsafe.Pointer(&p.tfn)
589 m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
590 m.Index = i
591 return m
594 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
595 if t.Kind() == Interface {
596 tt := (*interfaceType)(unsafe.Pointer(t))
597 return tt.MethodByName(name)
599 ut := t.uncommon()
600 if ut == nil {
601 return Method{}, false
603 for i := range ut.methods {
604 p := &ut.methods[i]
605 if p.pkgPath == nil && p.name != nil && *p.name == name {
606 return t.Method(i), true
609 return Method{}, false
612 func (t *rtype) PkgPath() string {
613 return t.uncommonType.PkgPath()
616 func (t *rtype) Name() string {
617 return t.uncommonType.Name()
620 func (t *rtype) ChanDir() ChanDir {
621 if t.Kind() != Chan {
622 panic("reflect: ChanDir of non-chan type")
624 tt := (*chanType)(unsafe.Pointer(t))
625 return ChanDir(tt.dir)
628 func (t *rtype) IsVariadic() bool {
629 if t.Kind() != Func {
630 panic("reflect: IsVariadic of non-func type")
632 tt := (*funcType)(unsafe.Pointer(t))
633 return tt.dotdotdot
636 func (t *rtype) Elem() Type {
637 switch t.Kind() {
638 case Array:
639 tt := (*arrayType)(unsafe.Pointer(t))
640 return toType(tt.elem)
641 case Chan:
642 tt := (*chanType)(unsafe.Pointer(t))
643 return toType(tt.elem)
644 case Map:
645 tt := (*mapType)(unsafe.Pointer(t))
646 return toType(tt.elem)
647 case Ptr:
648 tt := (*ptrType)(unsafe.Pointer(t))
649 return toType(tt.elem)
650 case Slice:
651 tt := (*sliceType)(unsafe.Pointer(t))
652 return toType(tt.elem)
654 panic("reflect: Elem of invalid type")
657 func (t *rtype) Field(i int) StructField {
658 if t.Kind() != Struct {
659 panic("reflect: Field of non-struct type")
661 tt := (*structType)(unsafe.Pointer(t))
662 return tt.Field(i)
665 func (t *rtype) FieldByIndex(index []int) StructField {
666 if t.Kind() != Struct {
667 panic("reflect: FieldByIndex of non-struct type")
669 tt := (*structType)(unsafe.Pointer(t))
670 return tt.FieldByIndex(index)
673 func (t *rtype) FieldByName(name string) (StructField, bool) {
674 if t.Kind() != Struct {
675 panic("reflect: FieldByName of non-struct type")
677 tt := (*structType)(unsafe.Pointer(t))
678 return tt.FieldByName(name)
681 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
682 if t.Kind() != Struct {
683 panic("reflect: FieldByNameFunc of non-struct type")
685 tt := (*structType)(unsafe.Pointer(t))
686 return tt.FieldByNameFunc(match)
689 func (t *rtype) In(i int) Type {
690 if t.Kind() != Func {
691 panic("reflect: In of non-func type")
693 tt := (*funcType)(unsafe.Pointer(t))
694 return toType(tt.in[i])
697 func (t *rtype) Key() Type {
698 if t.Kind() != Map {
699 panic("reflect: Key of non-map type")
701 tt := (*mapType)(unsafe.Pointer(t))
702 return toType(tt.key)
705 func (t *rtype) Len() int {
706 if t.Kind() != Array {
707 panic("reflect: Len of non-array type")
709 tt := (*arrayType)(unsafe.Pointer(t))
710 return int(tt.len)
713 func (t *rtype) NumField() int {
714 if t.Kind() != Struct {
715 panic("reflect: NumField of non-struct type")
717 tt := (*structType)(unsafe.Pointer(t))
718 return len(tt.fields)
721 func (t *rtype) NumIn() int {
722 if t.Kind() != Func {
723 panic("reflect: NumIn of non-func type")
725 tt := (*funcType)(unsafe.Pointer(t))
726 return len(tt.in)
729 func (t *rtype) NumOut() int {
730 if t.Kind() != Func {
731 panic("reflect: NumOut of non-func type")
733 tt := (*funcType)(unsafe.Pointer(t))
734 return len(tt.out)
737 func (t *rtype) Out(i int) Type {
738 if t.Kind() != Func {
739 panic("reflect: Out of non-func type")
741 tt := (*funcType)(unsafe.Pointer(t))
742 return toType(tt.out[i])
745 func (d ChanDir) String() string {
746 switch d {
747 case SendDir:
748 return "chan<-"
749 case RecvDir:
750 return "<-chan"
751 case BothDir:
752 return "chan"
754 return "ChanDir" + strconv.Itoa(int(d))
757 // Method returns the i'th method in the type's method set.
758 func (t *interfaceType) Method(i int) (m Method) {
759 if i < 0 || i >= len(t.methods) {
760 return
762 p := &t.methods[i]
763 m.Name = *p.name
764 if p.pkgPath != nil {
765 m.PkgPath = *p.pkgPath
767 m.Type = toType(p.typ)
768 m.Index = i
769 return
772 // NumMethod returns the number of interface methods in the type's method set.
773 func (t *interfaceType) NumMethod() int { return len(t.methods) }
775 // MethodByName method with the given name in the type's method set.
776 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
777 if t == nil {
778 return
780 var p *imethod
781 for i := range t.methods {
782 p = &t.methods[i]
783 if *p.name == name {
784 return t.Method(i), true
787 return
790 // A StructField describes a single field in a struct.
791 type StructField struct {
792 // Name is the field name.
793 Name string
794 // PkgPath is the package path that qualifies a lower case (unexported)
795 // field name. It is empty for upper case (exported) field names.
796 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
797 PkgPath string
799 Type Type // field type
800 Tag StructTag // field tag string
801 Offset uintptr // offset within struct, in bytes
802 Index []int // index sequence for Type.FieldByIndex
803 Anonymous bool // is an embedded field
806 // A StructTag is the tag string in a struct field.
808 // By convention, tag strings are a concatenation of
809 // optionally space-separated key:"value" pairs.
810 // Each key is a non-empty string consisting of non-control
811 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
812 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
813 // characters and Go string literal syntax.
814 type StructTag string
816 // Get returns the value associated with key in the tag string.
817 // If there is no such key in the tag, Get returns the empty string.
818 // If the tag does not have the conventional format, the value
819 // returned by Get is unspecified. To determine whether a tag is
820 // explicitly set to the empty string, use Lookup.
821 func (tag StructTag) Get(key string) string {
822 v, _ := tag.Lookup(key)
823 return v
826 // Lookup returns the value associated with key in the tag string.
827 // If the key is present in the tag the value (which may be empty)
828 // is returned. Otherwise the returned value will be the empty string.
829 // The ok return value reports whether the value was explicitly set in
830 // the tag string. If the tag does not have the conventional format,
831 // the value returned by Lookup is unspecified.
832 func (tag StructTag) Lookup(key string) (value string, ok bool) {
833 // When modifying this code, also update the validateStructTag code
834 // in cmd/vet/structtag.go.
836 for tag != "" {
837 // Skip leading space.
838 i := 0
839 for i < len(tag) && tag[i] == ' ' {
842 tag = tag[i:]
843 if tag == "" {
844 break
847 // Scan to colon. A space, a quote or a control character is a syntax error.
848 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
849 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
850 // as it is simpler to inspect the tag's bytes than the tag's runes.
851 i = 0
852 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
855 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
856 break
858 name := string(tag[:i])
859 tag = tag[i+1:]
861 // Scan quoted string to find value.
862 i = 1
863 for i < len(tag) && tag[i] != '"' {
864 if tag[i] == '\\' {
869 if i >= len(tag) {
870 break
872 qvalue := string(tag[:i+1])
873 tag = tag[i+1:]
875 if key == name {
876 value, err := strconv.Unquote(qvalue)
877 if err != nil {
878 break
880 return value, true
883 return "", false
886 // Field returns the i'th struct field.
887 func (t *structType) Field(i int) (f StructField) {
888 if i < 0 || i >= len(t.fields) {
889 panic("reflect: Field index out of bounds")
891 p := &t.fields[i]
892 f.Type = toType(p.typ)
893 f.Name = *p.name
894 f.Anonymous = p.anon()
895 if p.pkgPath != nil {
896 f.PkgPath = *p.pkgPath
898 if p.tag != nil {
899 f.Tag = StructTag(*p.tag)
901 f.Offset = p.offset()
903 // NOTE(rsc): This is the only allocation in the interface
904 // presented by a reflect.Type. It would be nice to avoid,
905 // at least in the common cases, but we need to make sure
906 // that misbehaving clients of reflect cannot affect other
907 // uses of reflect. One possibility is CL 5371098, but we
908 // postponed that ugliness until there is a demonstrated
909 // need for the performance. This is issue 2320.
910 f.Index = []int{i}
911 return
914 // TODO(gri): Should there be an error/bool indicator if the index
915 // is wrong for FieldByIndex?
917 // FieldByIndex returns the nested field corresponding to index.
918 func (t *structType) FieldByIndex(index []int) (f StructField) {
919 f.Type = toType(&t.rtype)
920 for i, x := range index {
921 if i > 0 {
922 ft := f.Type
923 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
924 ft = ft.Elem()
926 f.Type = ft
928 f = f.Type.Field(x)
930 return
933 // A fieldScan represents an item on the fieldByNameFunc scan work list.
934 type fieldScan struct {
935 typ *structType
936 index []int
939 // FieldByNameFunc returns the struct field with a name that satisfies the
940 // match function and a boolean to indicate if the field was found.
941 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
942 // This uses the same condition that the Go language does: there must be a unique instance
943 // of the match at a given depth level. If there are multiple instances of a match at the
944 // same depth, they annihilate each other and inhibit any possible match at a lower level.
945 // The algorithm is breadth first search, one depth level at a time.
947 // The current and next slices are work queues:
948 // current lists the fields to visit on this depth level,
949 // and next lists the fields on the next lower level.
950 current := []fieldScan{}
951 next := []fieldScan{{typ: t}}
953 // nextCount records the number of times an embedded type has been
954 // encountered and considered for queueing in the 'next' slice.
955 // We only queue the first one, but we increment the count on each.
956 // If a struct type T can be reached more than once at a given depth level,
957 // then it annihilates itself and need not be considered at all when we
958 // process that next depth level.
959 var nextCount map[*structType]int
961 // visited records the structs that have been considered already.
962 // Embedded pointer fields can create cycles in the graph of
963 // reachable embedded types; visited avoids following those cycles.
964 // It also avoids duplicated effort: if we didn't find the field in an
965 // embedded type T at level 2, we won't find it in one at level 4 either.
966 visited := map[*structType]bool{}
968 for len(next) > 0 {
969 current, next = next, current[:0]
970 count := nextCount
971 nextCount = nil
973 // Process all the fields at this depth, now listed in 'current'.
974 // The loop queues embedded fields found in 'next', for processing during the next
975 // iteration. The multiplicity of the 'current' field counts is recorded
976 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
977 for _, scan := range current {
978 t := scan.typ
979 if visited[t] {
980 // We've looked through this type before, at a higher level.
981 // That higher level would shadow the lower level we're now at,
982 // so this one can't be useful to us. Ignore it.
983 continue
985 visited[t] = true
986 for i := range t.fields {
987 f := &t.fields[i]
988 // Find name and (for anonymous field) type for field f.
989 fname := *f.name
990 var ntyp *rtype
991 if f.anon() {
992 // Anonymous field of type T or *T.
993 ntyp = f.typ
994 if ntyp.Kind() == Ptr {
995 ntyp = ntyp.Elem().common()
999 // Does it match?
1000 if match(fname) {
1001 // Potential match
1002 if count[t] > 1 || ok {
1003 // Name appeared multiple times at this level: annihilate.
1004 return StructField{}, false
1006 result = t.Field(i)
1007 result.Index = nil
1008 result.Index = append(result.Index, scan.index...)
1009 result.Index = append(result.Index, i)
1010 ok = true
1011 continue
1014 // Queue embedded struct fields for processing with next level,
1015 // but only if we haven't seen a match yet at this level and only
1016 // if the embedded types haven't already been queued.
1017 if ok || ntyp == nil || ntyp.Kind() != Struct {
1018 continue
1020 ntyp = toType(ntyp).common()
1021 styp := (*structType)(unsafe.Pointer(ntyp))
1022 if nextCount[styp] > 0 {
1023 nextCount[styp] = 2 // exact multiple doesn't matter
1024 continue
1026 if nextCount == nil {
1027 nextCount = map[*structType]int{}
1029 nextCount[styp] = 1
1030 if count[t] > 1 {
1031 nextCount[styp] = 2 // exact multiple doesn't matter
1033 var index []int
1034 index = append(index, scan.index...)
1035 index = append(index, i)
1036 next = append(next, fieldScan{styp, index})
1039 if ok {
1040 break
1043 return
1046 // FieldByName returns the struct field with the given name
1047 // and a boolean to indicate if the field was found.
1048 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1049 // Quick check for top-level name, or struct without anonymous fields.
1050 hasAnon := false
1051 if name != "" {
1052 for i := range t.fields {
1053 tf := &t.fields[i]
1054 if *tf.name == name {
1055 return t.Field(i), true
1057 if tf.anon() {
1058 hasAnon = true
1062 if !hasAnon {
1063 return
1065 return t.FieldByNameFunc(func(s string) bool { return s == name })
1068 // TypeOf returns the reflection Type that represents the dynamic type of i.
1069 // If i is a nil interface value, TypeOf returns nil.
1070 func TypeOf(i interface{}) Type {
1071 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1072 return toType(eface.typ)
1075 // ptrMap is the cache for PtrTo.
1076 var ptrMap sync.Map // map[*rtype]*ptrType
1078 // PtrTo returns the pointer type with element t.
1079 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1080 func PtrTo(t Type) Type {
1081 return t.(*rtype).ptrTo()
1084 func (t *rtype) ptrTo() *rtype {
1085 if p := t.ptrToThis; p != nil {
1086 return p
1089 // Check the cache.
1090 if pi, ok := ptrMap.Load(t); ok {
1091 return &pi.(*ptrType).rtype
1094 s := "*" + *t.string
1096 canonicalTypeLock.RLock()
1097 r, ok := canonicalType[s]
1098 canonicalTypeLock.RUnlock()
1099 if ok {
1100 p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
1101 pi, _ := ptrMap.LoadOrStore(t, p)
1102 return &pi.(*ptrType).rtype
1105 // Create a new ptrType starting with the description
1106 // of an *unsafe.Pointer.
1107 var iptr interface{} = (*unsafe.Pointer)(nil)
1108 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1109 pp := *prototype
1111 pp.string = &s
1112 pp.ptrToThis = nil
1114 // For the type structures linked into the binary, the
1115 // compiler provides a good hash of the string.
1116 // Create a good hash for the new string by using
1117 // the FNV-1 hash's mixing function to combine the
1118 // old hash and the new "*".
1119 // p.hash = fnv1(t.hash, '*')
1120 // This is the gccgo version.
1121 pp.hash = (t.hash << 4) + 9
1123 pp.uncommonType = nil
1124 pp.ptrToThis = nil
1125 pp.elem = t
1127 q := canonicalize(&pp.rtype)
1128 p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
1130 pi, _ := ptrMap.LoadOrStore(t, p)
1131 return &pi.(*ptrType).rtype
1134 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1135 func fnv1(x uint32, list ...byte) uint32 {
1136 for _, b := range list {
1137 x = x*16777619 ^ uint32(b)
1139 return x
1142 func (t *rtype) Implements(u Type) bool {
1143 if u == nil {
1144 panic("reflect: nil type passed to Type.Implements")
1146 if u.Kind() != Interface {
1147 panic("reflect: non-interface type passed to Type.Implements")
1149 return implements(u.(*rtype), t)
1152 func (t *rtype) AssignableTo(u Type) bool {
1153 if u == nil {
1154 panic("reflect: nil type passed to Type.AssignableTo")
1156 uu := u.(*rtype)
1157 return directlyAssignable(uu, t) || implements(uu, t)
1160 func (t *rtype) ConvertibleTo(u Type) bool {
1161 if u == nil {
1162 panic("reflect: nil type passed to Type.ConvertibleTo")
1164 uu := u.(*rtype)
1165 return convertOp(uu, t) != nil
1168 func (t *rtype) Comparable() bool {
1169 switch t.Kind() {
1170 case Bool, Int, Int8, Int16, Int32, Int64,
1171 Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
1172 Float32, Float64, Complex64, Complex128,
1173 Chan, Interface, Ptr, String, UnsafePointer:
1174 return true
1176 case Func, Map, Slice:
1177 return false
1179 case Array:
1180 return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
1182 case Struct:
1183 tt := (*structType)(unsafe.Pointer(t))
1184 for i := range tt.fields {
1185 if !tt.fields[i].typ.Comparable() {
1186 return false
1189 return true
1191 default:
1192 panic("reflect: impossible")
1196 // implements reports whether the type V implements the interface type T.
1197 func implements(T, V *rtype) bool {
1198 if T.Kind() != Interface {
1199 return false
1201 t := (*interfaceType)(unsafe.Pointer(T))
1202 if len(t.methods) == 0 {
1203 return true
1206 // The same algorithm applies in both cases, but the
1207 // method tables for an interface type and a concrete type
1208 // are different, so the code is duplicated.
1209 // In both cases the algorithm is a linear scan over the two
1210 // lists - T's methods and V's methods - simultaneously.
1211 // Since method tables are stored in a unique sorted order
1212 // (alphabetical, with no duplicate method names), the scan
1213 // through V's methods must hit a match for each of T's
1214 // methods along the way, or else V does not implement T.
1215 // This lets us run the scan in overall linear time instead of
1216 // the quadratic time a naive search would require.
1217 // See also ../runtime/iface.go.
1218 if V.Kind() == Interface {
1219 v := (*interfaceType)(unsafe.Pointer(V))
1220 i := 0
1221 for j := 0; j < len(v.methods); j++ {
1222 tm := &t.methods[i]
1223 vm := &v.methods[j]
1224 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.typ).common() == toType(tm.typ).common() {
1225 if i++; i >= len(t.methods) {
1226 return true
1230 return false
1233 v := V.uncommon()
1234 if v == nil {
1235 return false
1237 i := 0
1238 for j := 0; j < len(v.methods); j++ {
1239 tm := &t.methods[i]
1240 vm := &v.methods[j]
1241 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.mtyp).common() == toType(tm.typ).common() {
1242 if i++; i >= len(t.methods) {
1243 return true
1247 return false
1250 // directlyAssignable reports whether a value x of type V can be directly
1251 // assigned (using memmove) to a value of type T.
1252 // https://golang.org/doc/go_spec.html#Assignability
1253 // Ignoring the interface rules (implemented elsewhere)
1254 // and the ideal constant rules (no ideal constants at run time).
1255 func directlyAssignable(T, V *rtype) bool {
1256 // x's type V is identical to T?
1257 if T == V {
1258 return true
1261 // Otherwise at least one of T and V must be unnamed
1262 // and they must have the same kind.
1263 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
1264 return false
1267 // x's type T and V must have identical underlying types.
1268 return haveIdenticalUnderlyingType(T, V, true)
1271 func haveIdenticalType(T, V Type, cmpTags bool) bool {
1272 if cmpTags {
1273 return T == V
1276 if T.Name() != V.Name() || T.Kind() != V.Kind() {
1277 return false
1280 return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1283 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1284 if T == V {
1285 return true
1288 kind := T.Kind()
1289 if kind != V.Kind() {
1290 return false
1293 // Non-composite types of equal kind have same underlying type
1294 // (the predefined instance of the type).
1295 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1296 return true
1299 // Composite types.
1300 switch kind {
1301 case Array:
1302 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1304 case Chan:
1305 // Special case:
1306 // x is a bidirectional channel value, T is a channel type,
1307 // and x's type V and T have identical element types.
1308 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
1309 return true
1312 // Otherwise continue test for identical underlying type.
1313 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1315 case Func:
1316 t := (*funcType)(unsafe.Pointer(T))
1317 v := (*funcType)(unsafe.Pointer(V))
1318 if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
1319 return false
1321 for i, typ := range t.in {
1322 if !haveIdenticalType(typ, v.in[i], cmpTags) {
1323 return false
1326 for i, typ := range t.out {
1327 if !haveIdenticalType(typ, v.out[i], cmpTags) {
1328 return false
1331 return true
1333 case Interface:
1334 t := (*interfaceType)(unsafe.Pointer(T))
1335 v := (*interfaceType)(unsafe.Pointer(V))
1336 if len(t.methods) == 0 && len(v.methods) == 0 {
1337 return true
1339 // Might have the same methods but still
1340 // need a run time conversion.
1341 return false
1343 case Map:
1344 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1346 case Ptr, Slice:
1347 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1349 case Struct:
1350 t := (*structType)(unsafe.Pointer(T))
1351 v := (*structType)(unsafe.Pointer(V))
1352 if len(t.fields) != len(v.fields) {
1353 return false
1355 for i := range t.fields {
1356 tf := &t.fields[i]
1357 vf := &v.fields[i]
1358 if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
1359 return false
1361 if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
1362 return false
1364 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1365 return false
1367 if cmpTags && tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
1368 return false
1370 if tf.offsetAnon != vf.offsetAnon {
1371 return false
1374 return true
1377 return false
1380 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1381 var lookupCache sync.Map // map[cacheKey]*rtype
1383 // A cacheKey is the key for use in the lookupCache.
1384 // Four values describe any of the types we are looking for:
1385 // type kind, one or two subtypes, and an extra integer.
1386 type cacheKey struct {
1387 kind Kind
1388 t1 *rtype
1389 t2 *rtype
1390 extra uintptr
1393 // The funcLookupCache caches FuncOf lookups.
1394 // FuncOf does not share the common lookupCache since cacheKey is not
1395 // sufficient to represent functions unambiguously.
1396 var funcLookupCache struct {
1397 sync.Mutex // Guards stores (but not loads) on m.
1399 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1400 // Elements of m are append-only and thus safe for concurrent reading.
1401 m sync.Map
1404 // ChanOf returns the channel type with the given direction and element type.
1405 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1407 // The gc runtime imposes a limit of 64 kB on channel element types.
1408 // If t's size is equal to or exceeds this limit, ChanOf panics.
1409 func ChanOf(dir ChanDir, t Type) Type {
1410 typ := t.(*rtype)
1412 // Look in cache.
1413 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1414 if ch, ok := lookupCache.Load(ckey); ok {
1415 return ch.(*rtype)
1418 // This restriction is imposed by the gc compiler and the runtime.
1419 if typ.size >= 1<<16 {
1420 panic("reflect.ChanOf: element size too large")
1423 // Look in known types.
1424 // TODO: Precedence when constructing string.
1425 var s string
1426 switch dir {
1427 default:
1428 panic("reflect.ChanOf: invalid dir")
1429 case SendDir:
1430 s = "chan<- " + *typ.string
1431 case RecvDir:
1432 s = "<-chan " + *typ.string
1433 case BothDir:
1434 s = "chan " + *typ.string
1437 // Make a channel type.
1438 var ichan interface{} = (chan unsafe.Pointer)(nil)
1439 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1440 ch := *prototype
1441 ch.dir = uintptr(dir)
1442 ch.string = &s
1444 // gccgo uses a different hash.
1445 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1446 ch.hash = 0
1447 if dir&SendDir != 0 {
1448 ch.hash += 1
1450 if dir&RecvDir != 0 {
1451 ch.hash += 2
1453 ch.hash += typ.hash << 2
1454 ch.hash <<= 3
1455 ch.hash += 15
1457 ch.elem = typ
1458 ch.uncommonType = nil
1459 ch.ptrToThis = nil
1461 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
1462 return ti.(Type)
1465 func ismapkey(*rtype) bool // implemented in runtime
1467 // MapOf returns the map type with the given key and element types.
1468 // For example, if k represents int and e represents string,
1469 // MapOf(k, e) represents map[int]string.
1471 // If the key type is not a valid map key type (that is, if it does
1472 // not implement Go's == operator), MapOf panics.
1473 func MapOf(key, elem Type) Type {
1474 ktyp := key.(*rtype)
1475 etyp := elem.(*rtype)
1477 if !ismapkey(ktyp) {
1478 panic("reflect.MapOf: invalid key type " + ktyp.String())
1481 // Look in cache.
1482 ckey := cacheKey{Map, ktyp, etyp, 0}
1483 if mt, ok := lookupCache.Load(ckey); ok {
1484 return mt.(Type)
1487 // Look in known types.
1488 s := "map[" + *ktyp.string + "]" + *etyp.string
1490 // Make a map type.
1491 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1492 mt := **(**mapType)(unsafe.Pointer(&imap))
1493 mt.string = &s
1495 // gccgo uses a different hash
1496 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1497 mt.hash = ktyp.hash + etyp.hash + 2 + 14
1499 mt.key = ktyp
1500 mt.elem = etyp
1501 mt.uncommonType = nil
1502 mt.ptrToThis = nil
1504 mt.bucket = bucketOf(ktyp, etyp)
1505 if ktyp.size > maxKeySize {
1506 mt.keysize = uint8(ptrSize)
1507 mt.indirectkey = 1
1508 } else {
1509 mt.keysize = uint8(ktyp.size)
1510 mt.indirectkey = 0
1512 if etyp.size > maxValSize {
1513 mt.valuesize = uint8(ptrSize)
1514 mt.indirectvalue = 1
1515 } else {
1516 mt.valuesize = uint8(etyp.size)
1517 mt.indirectvalue = 0
1519 mt.bucketsize = uint16(mt.bucket.size)
1520 mt.reflexivekey = isReflexive(ktyp)
1521 mt.needkeyupdate = needKeyUpdate(ktyp)
1523 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
1524 return ti.(Type)
1527 // FuncOf returns the function type with the given argument and result types.
1528 // For example if k represents int and e represents string,
1529 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1531 // The variadic argument controls whether the function is variadic. FuncOf
1532 // panics if the in[len(in)-1] does not represent a slice and variadic is
1533 // true.
1534 func FuncOf(in, out []Type, variadic bool) Type {
1535 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1536 panic("reflect.FuncOf: last arg of variadic func must be slice")
1539 // Make a func type.
1540 var ifunc interface{} = (func())(nil)
1541 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1542 ft := new(funcType)
1543 *ft = *prototype
1545 // Build a hash and minimally populate ft.
1546 var hash uint32
1547 var fin, fout []*rtype
1548 shift := uint(1)
1549 for _, in := range in {
1550 t := in.(*rtype)
1551 fin = append(fin, t)
1552 hash += t.hash << shift
1553 shift++
1555 shift = 2
1556 for _, out := range out {
1557 t := out.(*rtype)
1558 fout = append(fout, t)
1559 hash += t.hash << shift
1560 shift++
1562 if variadic {
1563 hash++
1565 hash <<= 4
1566 hash += 8
1567 ft.hash = hash
1568 ft.in = fin
1569 ft.out = fout
1570 ft.dotdotdot = variadic
1572 // Look in cache.
1573 if ts, ok := funcLookupCache.m.Load(hash); ok {
1574 for _, t := range ts.([]*rtype) {
1575 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1576 return t
1581 // Not in cache, lock and retry.
1582 funcLookupCache.Lock()
1583 defer funcLookupCache.Unlock()
1584 if ts, ok := funcLookupCache.m.Load(hash); ok {
1585 for _, t := range ts.([]*rtype) {
1586 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1587 return t
1592 addToCache := func(tt *rtype) Type {
1593 var rts []*rtype
1594 if rti, ok := funcLookupCache.m.Load(hash); ok {
1595 rts = rti.([]*rtype)
1597 funcLookupCache.m.Store(hash, append(rts, tt))
1598 return tt
1601 str := funcStr(ft)
1603 // Populate the remaining fields of ft and store in cache.
1604 ft.string = &str
1605 ft.uncommonType = nil
1606 ft.ptrToThis = nil
1607 return addToCache(&ft.rtype)
1610 // funcStr builds a string representation of a funcType.
1611 func funcStr(ft *funcType) string {
1612 repr := make([]byte, 0, 64)
1613 repr = append(repr, "func("...)
1614 for i, t := range ft.in {
1615 if i > 0 {
1616 repr = append(repr, ", "...)
1618 if ft.dotdotdot && i == len(ft.in)-1 {
1619 repr = append(repr, "..."...)
1620 repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
1621 } else {
1622 repr = append(repr, *t.string...)
1625 repr = append(repr, ')')
1626 if l := len(ft.out); l == 1 {
1627 repr = append(repr, ' ')
1628 } else if l > 1 {
1629 repr = append(repr, " ("...)
1631 for i, t := range ft.out {
1632 if i > 0 {
1633 repr = append(repr, ", "...)
1635 repr = append(repr, *t.string...)
1637 if len(ft.out) > 1 {
1638 repr = append(repr, ')')
1640 return string(repr)
1643 // isReflexive reports whether the == operation on the type is reflexive.
1644 // That is, x == x for all values x of type t.
1645 func isReflexive(t *rtype) bool {
1646 switch t.Kind() {
1647 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
1648 return true
1649 case Float32, Float64, Complex64, Complex128, Interface:
1650 return false
1651 case Array:
1652 tt := (*arrayType)(unsafe.Pointer(t))
1653 return isReflexive(tt.elem)
1654 case Struct:
1655 tt := (*structType)(unsafe.Pointer(t))
1656 for _, f := range tt.fields {
1657 if !isReflexive(f.typ) {
1658 return false
1661 return true
1662 default:
1663 // Func, Map, Slice, Invalid
1664 panic("isReflexive called on non-key type " + t.String())
1668 // needKeyUpdate reports whether map overwrites require the key to be copied.
1669 func needKeyUpdate(t *rtype) bool {
1670 switch t.Kind() {
1671 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
1672 return false
1673 case Float32, Float64, Complex64, Complex128, Interface, String:
1674 // Float keys can be updated from +0 to -0.
1675 // String keys can be updated to use a smaller backing store.
1676 // Interfaces might have floats of strings in them.
1677 return true
1678 case Array:
1679 tt := (*arrayType)(unsafe.Pointer(t))
1680 return needKeyUpdate(tt.elem)
1681 case Struct:
1682 tt := (*structType)(unsafe.Pointer(t))
1683 for _, f := range tt.fields {
1684 if needKeyUpdate(f.typ) {
1685 return true
1688 return false
1689 default:
1690 // Func, Map, Slice, Invalid
1691 panic("needKeyUpdate called on non-key type " + t.String())
1695 // Make sure these routines stay in sync with ../../runtime/hashmap.go!
1696 // These types exist only for GC, so we only fill out GC relevant info.
1697 // Currently, that's just size and the GC program. We also fill in string
1698 // for possible debugging use.
1699 const (
1700 bucketSize uintptr = 8
1701 maxKeySize uintptr = 128
1702 maxValSize uintptr = 128
1705 func bucketOf(ktyp, etyp *rtype) *rtype {
1706 // See comment on hmap.overflow in ../runtime/hashmap.go.
1707 var kind uint8
1708 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
1709 ktyp.size <= maxKeySize && etyp.size <= maxValSize {
1710 kind = kindNoPointers
1713 if ktyp.size > maxKeySize {
1714 ktyp = PtrTo(ktyp).(*rtype)
1716 if etyp.size > maxValSize {
1717 etyp = PtrTo(etyp).(*rtype)
1720 // Prepare GC data if any.
1721 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1722 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1723 // Note that since the key and value are known to be <= 128 bytes,
1724 // they're guaranteed to have bitmaps instead of GC programs.
1725 var gcdata *byte
1726 var ptrdata uintptr
1728 size := bucketSize
1729 size = align(size, uintptr(ktyp.fieldAlign))
1730 size += bucketSize * ktyp.size
1731 size = align(size, uintptr(etyp.fieldAlign))
1732 size += bucketSize * etyp.size
1734 maxAlign := uintptr(ktyp.fieldAlign)
1735 if maxAlign < uintptr(etyp.fieldAlign) {
1736 maxAlign = uintptr(etyp.fieldAlign)
1738 if maxAlign > ptrSize {
1739 size = align(size, maxAlign)
1740 size += align(ptrSize, maxAlign) - ptrSize
1741 } else if maxAlign < ptrSize {
1742 size = align(size, ptrSize)
1743 maxAlign = ptrSize
1746 ovoff := size
1747 size += ptrSize
1749 if kind != kindNoPointers {
1750 nptr := size / ptrSize
1751 mask := make([]byte, (nptr+7)/8)
1752 psize := bucketSize
1753 psize = align(psize, uintptr(ktyp.fieldAlign))
1754 base := psize / ptrSize
1756 if ktyp.kind&kindNoPointers == 0 {
1757 if ktyp.kind&kindGCProg != 0 {
1758 panic("reflect: unexpected GC program in MapOf")
1760 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
1761 for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
1762 if (kmask[i/8]>>(i%8))&1 != 0 {
1763 for j := uintptr(0); j < bucketSize; j++ {
1764 word := base + j*ktyp.size/ptrSize + i
1765 mask[word/8] |= 1 << (word % 8)
1770 psize += bucketSize * ktyp.size
1771 psize = align(psize, uintptr(etyp.fieldAlign))
1772 base = psize / ptrSize
1774 if etyp.kind&kindNoPointers == 0 {
1775 if etyp.kind&kindGCProg != 0 {
1776 panic("reflect: unexpected GC program in MapOf")
1778 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
1779 for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
1780 if (emask[i/8]>>(i%8))&1 != 0 {
1781 for j := uintptr(0); j < bucketSize; j++ {
1782 word := base + j*etyp.size/ptrSize + i
1783 mask[word/8] |= 1 << (word % 8)
1789 word := ovoff / ptrSize
1790 mask[word/8] |= 1 << (word % 8)
1791 gcdata = &mask[0]
1792 ptrdata = (word + 1) * ptrSize
1794 // overflow word must be last
1795 if ptrdata != size {
1796 panic("reflect: bad layout computation in MapOf")
1800 b := &rtype{
1801 align: int8(maxAlign),
1802 fieldAlign: uint8(maxAlign),
1803 size: size,
1804 kind: kind,
1805 ptrdata: ptrdata,
1806 gcdata: gcdata,
1808 s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
1809 b.string = &s
1810 return b
1813 // SliceOf returns the slice type with element type t.
1814 // For example, if t represents int, SliceOf(t) represents []int.
1815 func SliceOf(t Type) Type {
1816 typ := t.(*rtype)
1818 // Look in cache.
1819 ckey := cacheKey{Slice, typ, nil, 0}
1820 if slice, ok := lookupCache.Load(ckey); ok {
1821 return slice.(Type)
1824 // Look in known types.
1825 s := "[]" + *typ.string
1827 // Make a slice type.
1828 var islice interface{} = ([]unsafe.Pointer)(nil)
1829 prototype := *(**sliceType)(unsafe.Pointer(&islice))
1830 slice := *prototype
1831 slice.string = &s
1833 // gccgo uses a different hash.
1834 // slice.hash = fnv1(typ.hash, '[')
1835 slice.hash = typ.hash + 1 + 13
1837 slice.elem = typ
1838 slice.uncommonType = nil
1839 slice.ptrToThis = nil
1841 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
1842 return ti.(Type)
1845 // The structLookupCache caches StructOf lookups.
1846 // StructOf does not share the common lookupCache since we need to pin
1847 // the memory associated with *structTypeFixedN.
1848 var structLookupCache struct {
1849 sync.Mutex // Guards stores (but not loads) on m.
1851 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
1852 // Elements in m are append-only and thus safe for concurrent reading.
1853 m sync.Map
1856 // isLetter returns true if a given 'rune' is classified as a Letter.
1857 func isLetter(ch rune) bool {
1858 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
1861 // isValidFieldName checks if a string is a valid (struct) field name or not.
1863 // According to the language spec, a field name should be an identifier.
1865 // identifier = letter { letter | unicode_digit } .
1866 // letter = unicode_letter | "_" .
1867 func isValidFieldName(fieldName string) bool {
1868 for i, c := range fieldName {
1869 if i == 0 && !isLetter(c) {
1870 return false
1873 if !(isLetter(c) || unicode.IsDigit(c)) {
1874 return false
1878 return len(fieldName) > 0
1881 // StructOf returns the struct type containing fields.
1882 // The Offset and Index fields are ignored and computed as they would be
1883 // by the compiler.
1885 // StructOf currently does not generate wrapper methods for embedded fields.
1886 // This limitation may be lifted in a future version.
1887 func StructOf(fields []StructField) Type {
1888 var (
1889 hash = uint32(0)
1890 size uintptr
1891 typalign int8
1892 comparable = true
1893 hashable = true
1895 fs = make([]structField, len(fields))
1896 repr = make([]byte, 0, 64)
1897 fset = map[string]struct{}{} // fields' names
1899 hasPtr = false // records whether at least one struct-field is a pointer
1900 hasGCProg = false // records whether a struct-field type has a GCProg
1903 lastzero := uintptr(0)
1904 repr = append(repr, "struct {"...)
1905 for i, field := range fields {
1906 if field.Name == "" {
1907 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
1909 if !isValidFieldName(field.Name) {
1910 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
1912 if field.Type == nil {
1913 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
1915 f := runtimeStructField(field)
1916 ft := f.typ
1917 if ft.kind&kindGCProg != 0 {
1918 hasGCProg = true
1920 if ft.pointers() {
1921 hasPtr = true
1924 // Update string and hash
1925 name := *f.name
1926 hash = (hash << 1) + ft.hash
1927 if !f.anon() {
1928 repr = append(repr, (" " + name)...)
1929 } else {
1930 // Embedded field
1931 repr = append(repr, " ?"...)
1932 if f.typ.Kind() == Ptr {
1933 // Embedded ** and *interface{} are illegal
1934 elem := ft.Elem()
1935 if k := elem.Kind(); k == Ptr || k == Interface {
1936 panic("reflect.StructOf: illegal anonymous field type " + ft.String())
1938 name = elem.String()
1939 } else {
1940 name = ft.String()
1943 switch f.typ.Kind() {
1944 case Interface:
1945 ift := (*interfaceType)(unsafe.Pointer(ft))
1946 if len(ift.methods) > 0 {
1947 panic("reflect.StructOf: embedded field with methods not implemented")
1949 case Ptr:
1950 ptr := (*ptrType)(unsafe.Pointer(ft))
1951 if unt := ptr.uncommon(); unt != nil {
1952 if len(unt.methods) > 0 {
1953 panic("reflect.StructOf: embedded field with methods not implemented")
1956 if unt := ptr.elem.uncommon(); unt != nil {
1957 if len(unt.methods) > 0 {
1958 panic("reflect.StructOf: embedded field with methods not implemented")
1961 default:
1962 if unt := ft.uncommon(); unt != nil {
1963 if len(unt.methods) > 0 {
1964 panic("reflect.StructOf: embedded field with methods not implemented")
1969 if _, dup := fset[name]; dup {
1970 panic("reflect.StructOf: duplicate field " + name)
1972 fset[name] = struct{}{}
1974 repr = append(repr, (" " + ft.String())...)
1975 if f.tag != nil {
1976 repr = append(repr, (" " + strconv.Quote(*f.tag))...)
1978 if i < len(fields)-1 {
1979 repr = append(repr, ';')
1982 comparable = comparable && (ft.equalfn != nil)
1983 hashable = hashable && (ft.hashfn != nil)
1985 offset := align(size, uintptr(ft.fieldAlign))
1986 if int8(ft.fieldAlign) > typalign {
1987 typalign = int8(ft.fieldAlign)
1989 size = offset + ft.size
1990 f.offsetAnon |= offset << 1
1992 if ft.size == 0 {
1993 lastzero = size
1996 fs[i] = f
1999 if size > 0 && lastzero == size {
2000 // This is a non-zero sized struct that ends in a
2001 // zero-sized field. We add an extra byte of padding,
2002 // to ensure that taking the address of the final
2003 // zero-sized field can't manufacture a pointer to the
2004 // next object in the heap. See issue 9401.
2005 size++
2008 if len(fs) > 0 {
2009 repr = append(repr, ' ')
2011 repr = append(repr, '}')
2012 hash <<= 2
2013 str := string(repr)
2015 // Round the size up to be a multiple of the alignment.
2016 size = align(size, uintptr(typalign))
2018 // Make the struct type.
2019 var istruct interface{} = struct{}{}
2020 prototype := *(**structType)(unsafe.Pointer(&istruct))
2021 typ := new(structType)
2022 *typ = *prototype
2023 typ.fields = fs
2025 // Look in cache.
2026 if ts, ok := structLookupCache.m.Load(hash); ok {
2027 for _, st := range ts.([]Type) {
2028 t := st.common()
2029 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2030 return t
2035 // Not in cache, lock and retry.
2036 structLookupCache.Lock()
2037 defer structLookupCache.Unlock()
2038 if ts, ok := structLookupCache.m.Load(hash); ok {
2039 for _, st := range ts.([]Type) {
2040 t := st.common()
2041 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2042 return t
2047 addToCache := func(t Type) Type {
2048 var ts []Type
2049 if ti, ok := structLookupCache.m.Load(hash); ok {
2050 ts = ti.([]Type)
2052 structLookupCache.m.Store(hash, append(ts, t))
2053 return t
2056 typ.string = &str
2057 typ.hash = hash
2058 typ.size = size
2059 typ.align = typalign
2060 typ.fieldAlign = uint8(typalign)
2061 if !hasPtr {
2062 typ.kind |= kindNoPointers
2063 } else {
2064 typ.kind &^= kindNoPointers
2067 if hasGCProg {
2068 lastPtrField := 0
2069 for i, ft := range fs {
2070 if ft.typ.pointers() {
2071 lastPtrField = i
2074 prog := []byte{0, 0, 0, 0} // will be length of prog
2075 for i, ft := range fs {
2076 if i > lastPtrField {
2077 // gcprog should not include anything for any field after
2078 // the last field that contains pointer data
2079 break
2081 // FIXME(sbinet) handle padding, fields smaller than a word
2082 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:]
2083 elemPtrs := ft.typ.ptrdata / ptrSize
2084 switch {
2085 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0:
2086 // Element is small with pointer mask; use as literal bits.
2087 mask := elemGC
2088 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2089 var n uintptr
2090 for n := elemPtrs; n > 120; n -= 120 {
2091 prog = append(prog, 120)
2092 prog = append(prog, mask[:15]...)
2093 mask = mask[15:]
2095 prog = append(prog, byte(n))
2096 prog = append(prog, mask[:(n+7)/8]...)
2097 case ft.typ.kind&kindGCProg != 0:
2098 // Element has GC program; emit one element.
2099 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2100 prog = append(prog, elemProg...)
2102 // Pad from ptrdata to size.
2103 elemWords := ft.typ.size / ptrSize
2104 if elemPtrs < elemWords {
2105 // Emit literal 0 bit, then repeat as needed.
2106 prog = append(prog, 0x01, 0x00)
2107 if elemPtrs+1 < elemWords {
2108 prog = append(prog, 0x81)
2109 prog = appendVarint(prog, elemWords-elemPtrs-1)
2113 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2114 typ.kind |= kindGCProg
2115 typ.gcdata = &prog[0]
2116 } else {
2117 typ.kind &^= kindGCProg
2118 bv := new(bitVector)
2119 addTypeBits(bv, 0, typ.common())
2120 if len(bv.data) > 0 {
2121 typ.gcdata = &bv.data[0]
2124 typ.ptrdata = typeptrdata(typ.common())
2126 if hashable {
2127 typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
2128 o := seed
2129 for _, ft := range typ.fields {
2130 pi := unsafe.Pointer(uintptr(p) + ft.offset())
2131 o = ft.typ.hashfn(pi, o)
2133 return o
2135 } else {
2136 typ.hashfn = nil
2139 if comparable {
2140 typ.equalfn = func(p, q unsafe.Pointer) bool {
2141 for _, ft := range typ.fields {
2142 pi := unsafe.Pointer(uintptr(p) + ft.offset())
2143 qi := unsafe.Pointer(uintptr(q) + ft.offset())
2144 if !ft.typ.equalfn(pi, qi) {
2145 return false
2148 return true
2150 } else {
2151 typ.equalfn = nil
2154 typ.kind &^= kindDirectIface
2155 typ.uncommonType = nil
2156 typ.ptrToThis = nil
2158 return addToCache(&typ.rtype)
2161 func runtimeStructField(field StructField) structField {
2162 if field.PkgPath != "" {
2163 panic("reflect.StructOf: StructOf does not allow unexported fields")
2166 // Best-effort check for misuse.
2167 // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
2168 c := field.Name[0]
2169 if 'a' <= c && c <= 'z' || c == '_' {
2170 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2173 offsetAnon := uintptr(0)
2174 if field.Anonymous {
2175 offsetAnon |= 1
2178 s := field.Name
2179 name := &s
2181 var tag *string
2182 if field.Tag != "" {
2183 st := string(field.Tag)
2184 tag = &st
2187 return structField{
2188 name: name,
2189 pkgPath: nil,
2190 typ: field.Type.common(),
2191 tag: tag,
2192 offsetAnon: offsetAnon,
2196 // typeptrdata returns the length in bytes of the prefix of t
2197 // containing pointer data. Anything after this offset is scalar data.
2198 // keep in sync with ../cmd/compile/internal/gc/reflect.go
2199 func typeptrdata(t *rtype) uintptr {
2200 if !t.pointers() {
2201 return 0
2203 switch t.Kind() {
2204 case Struct:
2205 st := (*structType)(unsafe.Pointer(t))
2206 // find the last field that has pointers.
2207 field := 0
2208 for i := range st.fields {
2209 ft := st.fields[i].typ
2210 if ft.pointers() {
2211 field = i
2214 f := st.fields[field]
2215 return f.offset() + f.typ.ptrdata
2217 default:
2218 panic("reflect.typeptrdata: unexpected type, " + t.String())
2222 // See cmd/compile/internal/gc/reflect.go for derivation of constant.
2223 const maxPtrmaskBytes = 2048
2225 // ArrayOf returns the array type with the given count and element type.
2226 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2228 // If the resulting type would be larger than the available address space,
2229 // ArrayOf panics.
2230 func ArrayOf(count int, elem Type) Type {
2231 typ := elem.(*rtype)
2233 // Look in cache.
2234 ckey := cacheKey{Array, typ, nil, uintptr(count)}
2235 if array, ok := lookupCache.Load(ckey); ok {
2236 return array.(Type)
2239 // Look in known types.
2240 s := "[" + strconv.Itoa(count) + "]" + *typ.string
2242 // Make an array type.
2243 var iarray interface{} = [1]unsafe.Pointer{}
2244 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2245 array := *prototype
2246 array.string = &s
2248 // gccgo uses a different hash.
2249 // array.hash = fnv1(typ.hash, '[')
2250 // for n := uint32(count); n > 0; n >>= 8 {
2251 // array.hash = fnv1(array.hash, byte(n))
2252 // }
2253 // array.hash = fnv1(array.hash, ']')
2254 array.hash = typ.hash + 1 + 13
2256 array.elem = typ
2257 array.ptrToThis = nil
2258 if typ.size > 0 {
2259 max := ^uintptr(0) / typ.size
2260 if uintptr(count) > max {
2261 panic("reflect.ArrayOf: array size would exceed virtual address space")
2264 array.size = typ.size * uintptr(count)
2265 if count > 0 && typ.ptrdata != 0 {
2266 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
2268 array.align = typ.align
2269 array.fieldAlign = typ.fieldAlign
2270 array.uncommonType = nil
2271 array.len = uintptr(count)
2272 array.slice = SliceOf(elem).(*rtype)
2274 array.kind &^= kindNoPointers
2275 switch {
2276 case typ.kind&kindNoPointers != 0 || array.size == 0:
2277 // No pointers.
2278 array.kind |= kindNoPointers
2279 array.gcdata = nil
2280 array.ptrdata = 0
2282 case count == 1:
2283 // In memory, 1-element array looks just like the element.
2284 array.kind |= typ.kind & kindGCProg
2285 array.gcdata = typ.gcdata
2286 array.ptrdata = typ.ptrdata
2288 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
2289 // Element is small with pointer mask; array is still small.
2290 // Create direct pointer mask by turning each 1 bit in elem
2291 // into count 1 bits in larger mask.
2292 mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
2293 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2294 elemWords := typ.size / ptrSize
2295 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ {
2296 if (elemMask[j/8]>>(j%8))&1 != 0 {
2297 for i := uintptr(0); i < array.len; i++ {
2298 k := i*elemWords + j
2299 mask[k/8] |= 1 << (k % 8)
2303 array.gcdata = &mask[0]
2305 default:
2306 // Create program that emits one element
2307 // and then repeats to make the array.
2308 prog := []byte{0, 0, 0, 0} // will be length of prog
2309 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2310 elemPtrs := typ.ptrdata / ptrSize
2311 if typ.kind&kindGCProg == 0 {
2312 // Element is small with pointer mask; use as literal bits.
2313 mask := elemGC
2314 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2315 var n uintptr
2316 for n = elemPtrs; n > 120; n -= 120 {
2317 prog = append(prog, 120)
2318 prog = append(prog, mask[:15]...)
2319 mask = mask[15:]
2321 prog = append(prog, byte(n))
2322 prog = append(prog, mask[:(n+7)/8]...)
2323 } else {
2324 // Element has GC program; emit one element.
2325 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2326 prog = append(prog, elemProg...)
2328 // Pad from ptrdata to size.
2329 elemWords := typ.size / ptrSize
2330 if elemPtrs < elemWords {
2331 // Emit literal 0 bit, then repeat as needed.
2332 prog = append(prog, 0x01, 0x00)
2333 if elemPtrs+1 < elemWords {
2334 prog = append(prog, 0x81)
2335 prog = appendVarint(prog, elemWords-elemPtrs-1)
2338 // Repeat count-1 times.
2339 if elemWords < 0x80 {
2340 prog = append(prog, byte(elemWords|0x80))
2341 } else {
2342 prog = append(prog, 0x80)
2343 prog = appendVarint(prog, elemWords)
2345 prog = appendVarint(prog, uintptr(count)-1)
2346 prog = append(prog, 0)
2347 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2348 array.kind |= kindGCProg
2349 array.gcdata = &prog[0]
2350 array.ptrdata = array.size // overestimate but ok; must match program
2353 array.kind &^= kindDirectIface
2355 esize := typ.size
2357 if typ.equalfn == nil {
2358 array.equalfn = nil
2359 } else {
2360 eequal := typ.equalfn
2361 array.equalfn = func(p, q unsafe.Pointer) bool {
2362 for i := 0; i < count; i++ {
2363 pi := arrayAt(p, i, esize)
2364 qi := arrayAt(q, i, esize)
2365 if !eequal(pi, qi) {
2366 return false
2369 return true
2373 if typ.hashfn == nil {
2374 array.hashfn = nil
2375 } else {
2376 ehash := typ.hashfn
2377 array.hashfn = func(ptr unsafe.Pointer, seed uintptr) uintptr {
2378 o := seed
2379 for i := 0; i < count; i++ {
2380 o = ehash(arrayAt(ptr, i, esize), o)
2382 return o
2386 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
2387 return ti.(Type)
2390 func appendVarint(x []byte, v uintptr) []byte {
2391 for ; v >= 0x80; v >>= 7 {
2392 x = append(x, byte(v|0x80))
2394 x = append(x, byte(v))
2395 return x
2398 // toType converts from a *rtype to a Type that can be returned
2399 // to the client of package reflect. In gc, the only concern is that
2400 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2401 // function takes care of ensuring that multiple *rtype for the same
2402 // type are coalesced into a single Type.
2403 var canonicalType = make(map[string]Type)
2405 var canonicalTypeLock sync.RWMutex
2407 func canonicalize(t Type) Type {
2408 if t == nil {
2409 return nil
2411 s := t.rawString()
2412 canonicalTypeLock.RLock()
2413 if r, ok := canonicalType[s]; ok {
2414 canonicalTypeLock.RUnlock()
2415 return r
2417 canonicalTypeLock.RUnlock()
2418 canonicalTypeLock.Lock()
2419 if r, ok := canonicalType[s]; ok {
2420 canonicalTypeLock.Unlock()
2421 return r
2423 canonicalType[s] = t
2424 canonicalTypeLock.Unlock()
2425 return t
2428 func toType(p *rtype) Type {
2429 if p == nil {
2430 return nil
2432 return canonicalize(p)
2435 // ifaceIndir reports whether t is stored indirectly in an interface value.
2436 func ifaceIndir(t *rtype) bool {
2437 return t.kind&kindDirectIface == 0
2440 // Layout matches runtime.gobitvector (well enough).
2441 type bitVector struct {
2442 n uint32 // number of bits
2443 data []byte
2446 // append a bit to the bitmap.
2447 func (bv *bitVector) append(bit uint8) {
2448 if bv.n%8 == 0 {
2449 bv.data = append(bv.data, 0)
2451 bv.data[bv.n/8] |= bit << (bv.n % 8)
2452 bv.n++
2455 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
2456 if t.kind&kindNoPointers != 0 {
2457 return
2460 switch Kind(t.kind & kindMask) {
2461 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
2462 // 1 pointer at start of representation
2463 for bv.n < uint32(offset/uintptr(ptrSize)) {
2464 bv.append(0)
2466 bv.append(1)
2468 case Interface:
2469 // 2 pointers
2470 for bv.n < uint32(offset/uintptr(ptrSize)) {
2471 bv.append(0)
2473 bv.append(1)
2474 bv.append(1)
2476 case Array:
2477 // repeat inner type
2478 tt := (*arrayType)(unsafe.Pointer(t))
2479 for i := 0; i < int(tt.len); i++ {
2480 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
2483 case Struct:
2484 // apply fields
2485 tt := (*structType)(unsafe.Pointer(t))
2486 for i := range tt.fields {
2487 f := &tt.fields[i]
2488 addTypeBits(bv, offset+f.offset(), f.typ)