libgo: update to Go 1.11
[official-gcc.git] / libgo / go / reflect / type.go
blobda7796f3703ce533263a52c1345487ddeeb596ff
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
9 //
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
12 // for that type.
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
16 package reflect
18 import (
19 "strconv"
20 "sync"
21 "unicode"
22 "unicode/utf8"
23 "unsafe"
26 // Type is the representation of a Go type.
28 // Not all methods apply to all kinds of types. Restrictions,
29 // if any, are noted in the documentation for each method.
30 // Use the Kind method to find out the kind of type before
31 // calling kind-specific methods. Calling a method
32 // inappropriate to the kind of type causes a run-time panic.
34 // Type values are comparable, such as with the == operator,
35 // so they can be used as map keys.
36 // Two Type values are equal if they represent identical types.
37 type Type interface {
38 // Methods applicable to all types.
40 // Align returns the alignment in bytes of a value of
41 // this type when allocated in memory.
42 Align() int
44 // FieldAlign returns the alignment in bytes of a value of
45 // this type when used as a field in a struct.
46 FieldAlign() int
48 // Method returns the i'th method in the type's method set.
49 // It panics if i is not in the range [0, NumMethod()).
51 // For a non-interface type T or *T, the returned Method's Type and Func
52 // fields describe a function whose first argument is the receiver.
54 // For an interface type, the returned Method's Type field gives the
55 // method signature, without a receiver, and the Func field is nil.
56 Method(int) Method
58 // MethodByName returns the method with that name in the type's
59 // method set and a boolean indicating if the method was found.
61 // For a non-interface type T or *T, the returned Method's Type and Func
62 // fields describe a function whose first argument is the receiver.
64 // For an interface type, the returned Method's Type field gives the
65 // method signature, without a receiver, and the Func field is nil.
66 MethodByName(string) (Method, bool)
68 // NumMethod returns the number of exported methods in the type's method set.
69 NumMethod() int
71 // Name returns the type's name within its package for a defined type.
72 // For other (non-defined) types it returns the empty string.
73 Name() string
75 // PkgPath returns a defined type's package path, that is, the import path
76 // that uniquely identifies the package, such as "encoding/base64".
77 // If the type was predeclared (string, error) or not defined (*T, struct{},
78 // []int, or A where A is an alias for a non-defined type), the package path
79 // will be the empty string.
80 PkgPath() string
82 // Size returns the number of bytes needed to store
83 // a value of the given type; it is analogous to unsafe.Sizeof.
84 Size() uintptr
86 // String returns a string representation of the type.
87 // The string representation may use shortened package names
88 // (e.g., base64 instead of "encoding/base64") and is not
89 // guaranteed to be unique among types. To test for type identity,
90 // compare the Types directly.
91 String() string
93 // Used internally by gccgo--the string retaining quoting.
94 rawString() string
96 // Kind returns the specific kind of this type.
97 Kind() Kind
99 // Implements reports whether the type implements the interface type u.
100 Implements(u Type) bool
102 // AssignableTo reports whether a value of the type is assignable to type u.
103 AssignableTo(u Type) bool
105 // ConvertibleTo reports whether a value of the type is convertible to type u.
106 ConvertibleTo(u Type) bool
108 // Comparable reports whether values of this type are comparable.
109 Comparable() bool
111 // Methods applicable only to some types, depending on Kind.
112 // The methods allowed for each kind are:
114 // Int*, Uint*, Float*, Complex*: Bits
115 // Array: Elem, Len
116 // Chan: ChanDir, Elem
117 // Func: In, NumIn, Out, NumOut, IsVariadic.
118 // Map: Key, Elem
119 // Ptr: Elem
120 // Slice: Elem
121 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
123 // Bits returns the size of the type in bits.
124 // It panics if the type's Kind is not one of the
125 // sized or unsized Int, Uint, Float, or Complex kinds.
126 Bits() int
128 // ChanDir returns a channel type's direction.
129 // It panics if the type's Kind is not Chan.
130 ChanDir() ChanDir
132 // IsVariadic reports whether a function type's final input parameter
133 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
134 // implicit actual type []T.
136 // For concreteness, if t represents func(x int, y ... float64), then
138 // t.NumIn() == 2
139 // t.In(0) is the reflect.Type for "int"
140 // t.In(1) is the reflect.Type for "[]float64"
141 // t.IsVariadic() == true
143 // IsVariadic panics if the type's Kind is not Func.
144 IsVariadic() bool
146 // Elem returns a type's element type.
147 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
148 Elem() Type
150 // Field returns a struct type's i'th field.
151 // It panics if the type's Kind is not Struct.
152 // It panics if i is not in the range [0, NumField()).
153 Field(i int) StructField
155 // FieldByIndex returns the nested field corresponding
156 // to the index sequence. It is equivalent to calling Field
157 // successively for each index i.
158 // It panics if the type's Kind is not Struct.
159 FieldByIndex(index []int) StructField
161 // FieldByName returns the struct field with the given name
162 // and a boolean indicating if the field was found.
163 FieldByName(name string) (StructField, bool)
165 // FieldByNameFunc returns the struct field with a name
166 // that satisfies the match function and a boolean indicating if
167 // the field was found.
169 // FieldByNameFunc considers the fields in the struct itself
170 // and then the fields in any embedded structs, in breadth first order,
171 // stopping at the shallowest nesting depth containing one or more
172 // fields satisfying the match function. If multiple fields at that depth
173 // satisfy the match function, they cancel each other
174 // and FieldByNameFunc returns no match.
175 // This behavior mirrors Go's handling of name lookup in
176 // structs containing embedded fields.
177 FieldByNameFunc(match func(string) bool) (StructField, bool)
179 // In returns the type of a function type's i'th input parameter.
180 // It panics if the type's Kind is not Func.
181 // It panics if i is not in the range [0, NumIn()).
182 In(i int) Type
184 // Key returns a map type's key type.
185 // It panics if the type's Kind is not Map.
186 Key() Type
188 // Len returns an array type's length.
189 // It panics if the type's Kind is not Array.
190 Len() int
192 // NumField returns a struct type's field count.
193 // It panics if the type's Kind is not Struct.
194 NumField() int
196 // NumIn returns a function type's input parameter count.
197 // It panics if the type's Kind is not Func.
198 NumIn() int
200 // NumOut returns a function type's output parameter count.
201 // It panics if the type's Kind is not Func.
202 NumOut() int
204 // Out returns the type of a function type's i'th output parameter.
205 // It panics if the type's Kind is not Func.
206 // It panics if i is not in the range [0, NumOut()).
207 Out(i int) Type
209 common() *rtype
210 uncommon() *uncommonType
213 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
214 // if the names are equal, even if they are unexported names originating
215 // in different packages. The practical effect of this is that the result of
216 // t.FieldByName("x") is not well defined if the struct type t contains
217 // multiple fields named x (embedded from different packages).
218 // FieldByName may return one of the fields named x or may report that there are none.
219 // See https://golang.org/issue/4876 for more details.
222 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
223 * A few are known to ../runtime/type.go to convey to debuggers.
224 * They are also known to ../runtime/type.go.
227 // A Kind represents the specific kind of type that a Type represents.
228 // The zero Kind is not a valid kind.
229 type Kind uint
231 const (
232 Invalid Kind = iota
233 Bool
235 Int8
236 Int16
237 Int32
238 Int64
239 Uint
240 Uint8
241 Uint16
242 Uint32
243 Uint64
244 Uintptr
245 Float32
246 Float64
247 Complex64
248 Complex128
249 Array
250 Chan
251 Func
252 Interface
255 Slice
256 String
257 Struct
258 UnsafePointer
261 // rtype is the common implementation of most values.
262 // It is embedded in other struct types.
264 // rtype must be kept in sync with ../runtime/type.go:/^type._type.
265 type rtype struct {
266 size uintptr
267 ptrdata uintptr // size of memory prefix holding all pointers
268 hash uint32 // hash of type; avoids computation in hash tables
269 kind uint8 // enumeration for C
270 align int8 // alignment of variable with this type
271 fieldAlign uint8 // alignment of struct field with this type
272 _ uint8 // unused/padding
274 hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
275 equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
277 gcdata *byte // garbage collection data
278 string *string // string form; unnecessary but undeniably useful
279 *uncommonType // (relatively) uncommon fields
280 ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
283 // Method on non-interface type
284 type method struct {
285 name *string // name of method
286 pkgPath *string // nil for exported Names; otherwise import path
287 mtyp *rtype // method type (without receiver)
288 typ *rtype // .(*FuncType) underneath (with receiver)
289 tfn unsafe.Pointer // fn used for normal method call
292 // uncommonType is present only for defined types or types with methods
293 // (if T is a defined type, the uncommonTypes for T and *T have methods).
294 // Using a pointer to this struct reduces the overall size required
295 // to describe a non-defined type with no methods.
296 type uncommonType struct {
297 name *string // name of type
298 pkgPath *string // import path; nil for built-in types like int, string
299 methods []method // methods associated with type
302 // ChanDir represents a channel type's direction.
303 type ChanDir int
305 const (
306 RecvDir ChanDir = 1 << iota // <-chan
307 SendDir // chan<-
308 BothDir = RecvDir | SendDir // chan
311 // arrayType represents a fixed array type.
312 type arrayType struct {
313 rtype
314 elem *rtype // array element type
315 slice *rtype // slice type
316 len uintptr
319 // chanType represents a channel type.
320 type chanType struct {
321 rtype
322 elem *rtype // channel element type
323 dir uintptr // channel direction (ChanDir)
326 // funcType represents a function type.
327 type funcType struct {
328 rtype
329 dotdotdot bool // last input parameter is ...
330 in []*rtype // input parameter types
331 out []*rtype // output parameter types
334 // imethod represents a method on an interface type
335 type imethod struct {
336 name *string // name of method
337 pkgPath *string // nil for exported Names; otherwise import path
338 typ *rtype // .(*FuncType) underneath
341 // interfaceType represents an interface type.
342 type interfaceType struct {
343 rtype
344 methods []imethod // sorted by hash
347 // mapType represents a map type.
348 type mapType struct {
349 rtype
350 key *rtype // map key type
351 elem *rtype // map element (value) type
352 bucket *rtype // internal bucket structure
353 keysize uint8 // size of key slot
354 indirectkey uint8 // store ptr to key instead of key itself
355 valuesize uint8 // size of value slot
356 indirectvalue uint8 // store ptr to value instead of value itself
357 bucketsize uint16 // size of bucket
358 reflexivekey bool // true if k==k for all keys
359 needkeyupdate bool // true if we need to update key on an overwrite
362 // ptrType represents a pointer type.
363 type ptrType struct {
364 rtype
365 elem *rtype // pointer element (pointed at) type
368 // sliceType represents a slice type.
369 type sliceType struct {
370 rtype
371 elem *rtype // slice element type
374 // Struct field
375 type structField struct {
376 name *string // name is always non-empty
377 pkgPath *string // nil for exported Names; otherwise import path
378 typ *rtype // type of field
379 tag *string // nil if no tag
380 offsetEmbed uintptr // byte offset of field<<1 | isAnonymous
383 func (f *structField) offset() uintptr {
384 return f.offsetEmbed >> 1
387 func (f *structField) embedded() bool {
388 return f.offsetEmbed&1 != 0
391 // structType represents a struct type.
392 type structType struct {
393 rtype
394 fields []structField // sorted by offset
398 * The compiler knows the exact layout of all the data structures above.
399 * The compiler does not know about the data structures and methods below.
402 // Method represents a single method.
403 type Method struct {
404 // Name is the method name.
405 // PkgPath is the package path that qualifies a lower case (unexported)
406 // method name. It is empty for upper case (exported) method names.
407 // The combination of PkgPath and Name uniquely identifies a method
408 // in a method set.
409 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
410 Name string
411 PkgPath string
413 Type Type // method type
414 Func Value // func with receiver as first argument
415 Index int // index for Type.Method
418 const (
419 kindDirectIface = 1 << 5
420 kindGCProg = 1 << 6 // Type.gc points to GC program
421 kindNoPointers = 1 << 7
422 kindMask = (1 << 5) - 1
425 func (k Kind) String() string {
426 if int(k) < len(kindNames) {
427 return kindNames[k]
429 return "kind" + strconv.Itoa(int(k))
432 var kindNames = []string{
433 Invalid: "invalid",
434 Bool: "bool",
435 Int: "int",
436 Int8: "int8",
437 Int16: "int16",
438 Int32: "int32",
439 Int64: "int64",
440 Uint: "uint",
441 Uint8: "uint8",
442 Uint16: "uint16",
443 Uint32: "uint32",
444 Uint64: "uint64",
445 Uintptr: "uintptr",
446 Float32: "float32",
447 Float64: "float64",
448 Complex64: "complex64",
449 Complex128: "complex128",
450 Array: "array",
451 Chan: "chan",
452 Func: "func",
453 Interface: "interface",
454 Map: "map",
455 Ptr: "ptr",
456 Slice: "slice",
457 String: "string",
458 Struct: "struct",
459 UnsafePointer: "unsafe.Pointer",
462 func (t *uncommonType) uncommon() *uncommonType {
463 return t
466 func (t *uncommonType) PkgPath() string {
467 if t == nil || t.pkgPath == nil {
468 return ""
470 return *t.pkgPath
473 func (t *uncommonType) Name() string {
474 if t == nil || t.name == nil {
475 return ""
477 return *t.name
480 var methodCache sync.Map // map[*uncommonType][]method
482 func (t *uncommonType) exportedMethods() []method {
483 methodsi, found := methodCache.Load(t)
484 if found {
485 return methodsi.([]method)
488 allm := t.methods
489 allExported := true
490 for _, m := range allm {
491 if m.pkgPath != nil {
492 allExported = false
493 break
496 var methods []method
497 if allExported {
498 methods = allm
499 } else {
500 methods = make([]method, 0, len(allm))
501 for _, m := range allm {
502 if m.pkgPath == nil {
503 methods = append(methods, m)
506 methods = methods[:len(methods):len(methods)]
509 methodsi, _ = methodCache.LoadOrStore(t, methods)
510 return methodsi.([]method)
513 func (t *rtype) rawString() string { return *t.string }
515 func (t *rtype) String() string {
516 // For gccgo, strip out quoted strings.
517 s := *t.string
518 var q bool
519 r := make([]byte, len(s))
520 j := 0
521 for i := 0; i < len(s); i++ {
522 if s[i] == '\t' {
523 q = !q
524 } else if !q {
525 r[j] = s[i]
529 return string(r[:j])
532 func (t *rtype) Size() uintptr { return t.size }
534 func (t *rtype) Bits() int {
535 if t == nil {
536 panic("reflect: Bits of nil Type")
538 k := t.Kind()
539 if k < Int || k > Complex128 {
540 panic("reflect: Bits of non-arithmetic Type " + t.String())
542 return int(t.size) * 8
545 func (t *rtype) Align() int { return int(t.align) }
547 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
549 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
551 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
553 func (t *rtype) common() *rtype { return t }
555 func (t *rtype) exportedMethods() []method {
556 ut := t.uncommon()
557 if ut == nil {
558 return nil
560 return ut.exportedMethods()
563 func (t *rtype) NumMethod() int {
564 if t.Kind() == Interface {
565 tt := (*interfaceType)(unsafe.Pointer(t))
566 return tt.NumMethod()
568 return len(t.exportedMethods())
571 func (t *rtype) Method(i int) (m Method) {
572 if t.Kind() == Interface {
573 tt := (*interfaceType)(unsafe.Pointer(t))
574 return tt.Method(i)
576 methods := t.exportedMethods()
577 if i < 0 || i >= len(methods) {
578 panic("reflect: Method index out of range")
580 p := methods[i]
581 if p.name != nil {
582 m.Name = *p.name
584 fl := flag(Func)
585 mt := p.typ
586 m.Type = toType(mt)
587 x := new(unsafe.Pointer)
588 *x = unsafe.Pointer(&p.tfn)
589 m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
590 m.Index = i
591 return m
594 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
595 if t.Kind() == Interface {
596 tt := (*interfaceType)(unsafe.Pointer(t))
597 return tt.MethodByName(name)
599 ut := t.uncommon()
600 if ut == nil {
601 return Method{}, false
603 utmethods := ut.methods
604 var eidx int
605 for i := 0; i < len(utmethods); i++ {
606 p := utmethods[i]
607 if p.pkgPath == nil {
608 if p.name != nil && *p.name == name {
609 return t.Method(eidx), true
611 eidx++
614 return Method{}, false
617 func (t *rtype) PkgPath() string {
618 return t.uncommonType.PkgPath()
621 func (t *rtype) Name() string {
622 return t.uncommonType.Name()
625 func (t *rtype) ChanDir() ChanDir {
626 if t.Kind() != Chan {
627 panic("reflect: ChanDir of non-chan type")
629 tt := (*chanType)(unsafe.Pointer(t))
630 return ChanDir(tt.dir)
633 func (t *rtype) IsVariadic() bool {
634 if t.Kind() != Func {
635 panic("reflect: IsVariadic of non-func type")
637 tt := (*funcType)(unsafe.Pointer(t))
638 return tt.dotdotdot
641 func (t *rtype) Elem() Type {
642 switch t.Kind() {
643 case Array:
644 tt := (*arrayType)(unsafe.Pointer(t))
645 return toType(tt.elem)
646 case Chan:
647 tt := (*chanType)(unsafe.Pointer(t))
648 return toType(tt.elem)
649 case Map:
650 tt := (*mapType)(unsafe.Pointer(t))
651 return toType(tt.elem)
652 case Ptr:
653 tt := (*ptrType)(unsafe.Pointer(t))
654 return toType(tt.elem)
655 case Slice:
656 tt := (*sliceType)(unsafe.Pointer(t))
657 return toType(tt.elem)
659 panic("reflect: Elem of invalid type")
662 func (t *rtype) Field(i int) StructField {
663 if t.Kind() != Struct {
664 panic("reflect: Field of non-struct type")
666 tt := (*structType)(unsafe.Pointer(t))
667 return tt.Field(i)
670 func (t *rtype) FieldByIndex(index []int) StructField {
671 if t.Kind() != Struct {
672 panic("reflect: FieldByIndex of non-struct type")
674 tt := (*structType)(unsafe.Pointer(t))
675 return tt.FieldByIndex(index)
678 func (t *rtype) FieldByName(name string) (StructField, bool) {
679 if t.Kind() != Struct {
680 panic("reflect: FieldByName of non-struct type")
682 tt := (*structType)(unsafe.Pointer(t))
683 return tt.FieldByName(name)
686 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
687 if t.Kind() != Struct {
688 panic("reflect: FieldByNameFunc of non-struct type")
690 tt := (*structType)(unsafe.Pointer(t))
691 return tt.FieldByNameFunc(match)
694 func (t *rtype) In(i int) Type {
695 if t.Kind() != Func {
696 panic("reflect: In of non-func type")
698 tt := (*funcType)(unsafe.Pointer(t))
699 return toType(tt.in[i])
702 func (t *rtype) Key() Type {
703 if t.Kind() != Map {
704 panic("reflect: Key of non-map type")
706 tt := (*mapType)(unsafe.Pointer(t))
707 return toType(tt.key)
710 func (t *rtype) Len() int {
711 if t.Kind() != Array {
712 panic("reflect: Len of non-array type")
714 tt := (*arrayType)(unsafe.Pointer(t))
715 return int(tt.len)
718 func (t *rtype) NumField() int {
719 if t.Kind() != Struct {
720 panic("reflect: NumField of non-struct type")
722 tt := (*structType)(unsafe.Pointer(t))
723 return len(tt.fields)
726 func (t *rtype) NumIn() int {
727 if t.Kind() != Func {
728 panic("reflect: NumIn of non-func type")
730 tt := (*funcType)(unsafe.Pointer(t))
731 return len(tt.in)
734 func (t *rtype) NumOut() int {
735 if t.Kind() != Func {
736 panic("reflect: NumOut of non-func type")
738 tt := (*funcType)(unsafe.Pointer(t))
739 return len(tt.out)
742 func (t *rtype) Out(i int) Type {
743 if t.Kind() != Func {
744 panic("reflect: Out of non-func type")
746 tt := (*funcType)(unsafe.Pointer(t))
747 return toType(tt.out[i])
750 // add returns p+x.
752 // The whySafe string is ignored, so that the function still inlines
753 // as efficiently as p+x, but all call sites should use the string to
754 // record why the addition is safe, which is to say why the addition
755 // does not cause x to advance to the very end of p's allocation
756 // and therefore point incorrectly at the next block in memory.
757 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
758 return unsafe.Pointer(uintptr(p) + x)
761 func (d ChanDir) String() string {
762 switch d {
763 case SendDir:
764 return "chan<-"
765 case RecvDir:
766 return "<-chan"
767 case BothDir:
768 return "chan"
770 return "ChanDir" + strconv.Itoa(int(d))
773 // Method returns the i'th method in the type's method set.
774 func (t *interfaceType) Method(i int) (m Method) {
775 if i < 0 || i >= len(t.methods) {
776 return
778 p := &t.methods[i]
779 m.Name = *p.name
780 if p.pkgPath != nil {
781 m.PkgPath = *p.pkgPath
783 m.Type = toType(p.typ)
784 m.Index = i
785 return
788 // NumMethod returns the number of interface methods in the type's method set.
789 func (t *interfaceType) NumMethod() int { return len(t.methods) }
791 // MethodByName method with the given name in the type's method set.
792 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
793 if t == nil {
794 return
796 var p *imethod
797 for i := range t.methods {
798 p = &t.methods[i]
799 if *p.name == name {
800 return t.Method(i), true
803 return
806 // A StructField describes a single field in a struct.
807 type StructField struct {
808 // Name is the field name.
809 Name string
810 // PkgPath is the package path that qualifies a lower case (unexported)
811 // field name. It is empty for upper case (exported) field names.
812 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
813 PkgPath string
815 Type Type // field type
816 Tag StructTag // field tag string
817 Offset uintptr // offset within struct, in bytes
818 Index []int // index sequence for Type.FieldByIndex
819 Anonymous bool // is an embedded field
822 // A StructTag is the tag string in a struct field.
824 // By convention, tag strings are a concatenation of
825 // optionally space-separated key:"value" pairs.
826 // Each key is a non-empty string consisting of non-control
827 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
828 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
829 // characters and Go string literal syntax.
830 type StructTag string
832 // Get returns the value associated with key in the tag string.
833 // If there is no such key in the tag, Get returns the empty string.
834 // If the tag does not have the conventional format, the value
835 // returned by Get is unspecified. To determine whether a tag is
836 // explicitly set to the empty string, use Lookup.
837 func (tag StructTag) Get(key string) string {
838 v, _ := tag.Lookup(key)
839 return v
842 // Lookup returns the value associated with key in the tag string.
843 // If the key is present in the tag the value (which may be empty)
844 // is returned. Otherwise the returned value will be the empty string.
845 // The ok return value reports whether the value was explicitly set in
846 // the tag string. If the tag does not have the conventional format,
847 // the value returned by Lookup is unspecified.
848 func (tag StructTag) Lookup(key string) (value string, ok bool) {
849 // When modifying this code, also update the validateStructTag code
850 // in cmd/vet/structtag.go.
852 for tag != "" {
853 // Skip leading space.
854 i := 0
855 for i < len(tag) && tag[i] == ' ' {
858 tag = tag[i:]
859 if tag == "" {
860 break
863 // Scan to colon. A space, a quote or a control character is a syntax error.
864 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
865 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
866 // as it is simpler to inspect the tag's bytes than the tag's runes.
867 i = 0
868 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
871 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
872 break
874 name := string(tag[:i])
875 tag = tag[i+1:]
877 // Scan quoted string to find value.
878 i = 1
879 for i < len(tag) && tag[i] != '"' {
880 if tag[i] == '\\' {
885 if i >= len(tag) {
886 break
888 qvalue := string(tag[:i+1])
889 tag = tag[i+1:]
891 if key == name {
892 value, err := strconv.Unquote(qvalue)
893 if err != nil {
894 break
896 return value, true
899 return "", false
902 // Field returns the i'th struct field.
903 func (t *structType) Field(i int) (f StructField) {
904 if i < 0 || i >= len(t.fields) {
905 panic("reflect: Field index out of bounds")
907 p := &t.fields[i]
908 f.Type = toType(p.typ)
909 f.Name = *p.name
910 f.Anonymous = p.embedded()
911 if p.pkgPath != nil {
912 f.PkgPath = *p.pkgPath
914 if p.tag != nil {
915 f.Tag = StructTag(*p.tag)
917 f.Offset = p.offset()
919 // NOTE(rsc): This is the only allocation in the interface
920 // presented by a reflect.Type. It would be nice to avoid,
921 // at least in the common cases, but we need to make sure
922 // that misbehaving clients of reflect cannot affect other
923 // uses of reflect. One possibility is CL 5371098, but we
924 // postponed that ugliness until there is a demonstrated
925 // need for the performance. This is issue 2320.
926 f.Index = []int{i}
927 return
930 // TODO(gri): Should there be an error/bool indicator if the index
931 // is wrong for FieldByIndex?
933 // FieldByIndex returns the nested field corresponding to index.
934 func (t *structType) FieldByIndex(index []int) (f StructField) {
935 f.Type = toType(&t.rtype)
936 for i, x := range index {
937 if i > 0 {
938 ft := f.Type
939 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
940 ft = ft.Elem()
942 f.Type = ft
944 f = f.Type.Field(x)
946 return
949 // A fieldScan represents an item on the fieldByNameFunc scan work list.
950 type fieldScan struct {
951 typ *structType
952 index []int
955 // FieldByNameFunc returns the struct field with a name that satisfies the
956 // match function and a boolean to indicate if the field was found.
957 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
958 // This uses the same condition that the Go language does: there must be a unique instance
959 // of the match at a given depth level. If there are multiple instances of a match at the
960 // same depth, they annihilate each other and inhibit any possible match at a lower level.
961 // The algorithm is breadth first search, one depth level at a time.
963 // The current and next slices are work queues:
964 // current lists the fields to visit on this depth level,
965 // and next lists the fields on the next lower level.
966 current := []fieldScan{}
967 next := []fieldScan{{typ: t}}
969 // nextCount records the number of times an embedded type has been
970 // encountered and considered for queueing in the 'next' slice.
971 // We only queue the first one, but we increment the count on each.
972 // If a struct type T can be reached more than once at a given depth level,
973 // then it annihilates itself and need not be considered at all when we
974 // process that next depth level.
975 var nextCount map[*structType]int
977 // visited records the structs that have been considered already.
978 // Embedded pointer fields can create cycles in the graph of
979 // reachable embedded types; visited avoids following those cycles.
980 // It also avoids duplicated effort: if we didn't find the field in an
981 // embedded type T at level 2, we won't find it in one at level 4 either.
982 visited := map[*structType]bool{}
984 for len(next) > 0 {
985 current, next = next, current[:0]
986 count := nextCount
987 nextCount = nil
989 // Process all the fields at this depth, now listed in 'current'.
990 // The loop queues embedded fields found in 'next', for processing during the next
991 // iteration. The multiplicity of the 'current' field counts is recorded
992 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
993 for _, scan := range current {
994 t := scan.typ
995 if visited[t] {
996 // We've looked through this type before, at a higher level.
997 // That higher level would shadow the lower level we're now at,
998 // so this one can't be useful to us. Ignore it.
999 continue
1001 visited[t] = true
1002 for i := range t.fields {
1003 f := &t.fields[i]
1004 // Find name and (for embedded field) type for field f.
1005 fname := *f.name
1006 var ntyp *rtype
1007 if f.embedded() {
1008 // Embedded field of type T or *T.
1009 ntyp = f.typ
1010 if ntyp.Kind() == Ptr {
1011 ntyp = ntyp.Elem().common()
1015 // Does it match?
1016 if match(fname) {
1017 // Potential match
1018 if count[t] > 1 || ok {
1019 // Name appeared multiple times at this level: annihilate.
1020 return StructField{}, false
1022 result = t.Field(i)
1023 result.Index = nil
1024 result.Index = append(result.Index, scan.index...)
1025 result.Index = append(result.Index, i)
1026 ok = true
1027 continue
1030 // Queue embedded struct fields for processing with next level,
1031 // but only if we haven't seen a match yet at this level and only
1032 // if the embedded types haven't already been queued.
1033 if ok || ntyp == nil || ntyp.Kind() != Struct {
1034 continue
1036 ntyp = toType(ntyp).common()
1037 styp := (*structType)(unsafe.Pointer(ntyp))
1038 if nextCount[styp] > 0 {
1039 nextCount[styp] = 2 // exact multiple doesn't matter
1040 continue
1042 if nextCount == nil {
1043 nextCount = map[*structType]int{}
1045 nextCount[styp] = 1
1046 if count[t] > 1 {
1047 nextCount[styp] = 2 // exact multiple doesn't matter
1049 var index []int
1050 index = append(index, scan.index...)
1051 index = append(index, i)
1052 next = append(next, fieldScan{styp, index})
1055 if ok {
1056 break
1059 return
1062 // FieldByName returns the struct field with the given name
1063 // and a boolean to indicate if the field was found.
1064 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1065 // Quick check for top-level name, or struct without embedded fields.
1066 hasEmbeds := false
1067 if name != "" {
1068 for i := range t.fields {
1069 tf := &t.fields[i]
1070 if *tf.name == name {
1071 return t.Field(i), true
1073 if tf.embedded() {
1074 hasEmbeds = true
1078 if !hasEmbeds {
1079 return
1081 return t.FieldByNameFunc(func(s string) bool { return s == name })
1084 // TypeOf returns the reflection Type that represents the dynamic type of i.
1085 // If i is a nil interface value, TypeOf returns nil.
1086 func TypeOf(i interface{}) Type {
1087 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1088 return toType(eface.typ)
1091 // ptrMap is the cache for PtrTo.
1092 var ptrMap sync.Map // map[*rtype]*ptrType
1094 // PtrTo returns the pointer type with element t.
1095 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1096 func PtrTo(t Type) Type {
1097 return t.(*rtype).ptrTo()
1100 func (t *rtype) ptrTo() *rtype {
1101 if p := t.ptrToThis; p != nil {
1102 return p
1105 // Check the cache.
1106 if pi, ok := ptrMap.Load(t); ok {
1107 return &pi.(*ptrType).rtype
1110 s := "*" + *t.string
1112 canonicalTypeLock.RLock()
1113 r, ok := canonicalType[s]
1114 canonicalTypeLock.RUnlock()
1115 if ok {
1116 p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
1117 pi, _ := ptrMap.LoadOrStore(t, p)
1118 return &pi.(*ptrType).rtype
1121 // Create a new ptrType starting with the description
1122 // of an *unsafe.Pointer.
1123 var iptr interface{} = (*unsafe.Pointer)(nil)
1124 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1125 pp := *prototype
1127 pp.string = &s
1128 pp.ptrToThis = nil
1130 // For the type structures linked into the binary, the
1131 // compiler provides a good hash of the string.
1132 // Create a good hash for the new string by using
1133 // the FNV-1 hash's mixing function to combine the
1134 // old hash and the new "*".
1135 // p.hash = fnv1(t.hash, '*')
1136 // This is the gccgo version.
1137 pp.hash = (t.hash << 4) + 9
1139 pp.uncommonType = nil
1140 pp.ptrToThis = nil
1141 pp.elem = t
1143 q := canonicalize(&pp.rtype)
1144 p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
1146 pi, _ := ptrMap.LoadOrStore(t, p)
1147 return &pi.(*ptrType).rtype
1150 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1151 func fnv1(x uint32, list ...byte) uint32 {
1152 for _, b := range list {
1153 x = x*16777619 ^ uint32(b)
1155 return x
1158 func (t *rtype) Implements(u Type) bool {
1159 if u == nil {
1160 panic("reflect: nil type passed to Type.Implements")
1162 if u.Kind() != Interface {
1163 panic("reflect: non-interface type passed to Type.Implements")
1165 return implements(u.(*rtype), t)
1168 func (t *rtype) AssignableTo(u Type) bool {
1169 if u == nil {
1170 panic("reflect: nil type passed to Type.AssignableTo")
1172 uu := u.(*rtype)
1173 return directlyAssignable(uu, t) || implements(uu, t)
1176 func (t *rtype) ConvertibleTo(u Type) bool {
1177 if u == nil {
1178 panic("reflect: nil type passed to Type.ConvertibleTo")
1180 uu := u.(*rtype)
1181 return convertOp(uu, t) != nil
1184 func (t *rtype) Comparable() bool {
1185 switch t.Kind() {
1186 case Bool, Int, Int8, Int16, Int32, Int64,
1187 Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
1188 Float32, Float64, Complex64, Complex128,
1189 Chan, Interface, Ptr, String, UnsafePointer:
1190 return true
1192 case Func, Map, Slice:
1193 return false
1195 case Array:
1196 return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
1198 case Struct:
1199 tt := (*structType)(unsafe.Pointer(t))
1200 for i := range tt.fields {
1201 if !tt.fields[i].typ.Comparable() {
1202 return false
1205 return true
1207 default:
1208 panic("reflect: impossible")
1212 // implements reports whether the type V implements the interface type T.
1213 func implements(T, V *rtype) bool {
1214 if T.Kind() != Interface {
1215 return false
1217 t := (*interfaceType)(unsafe.Pointer(T))
1218 if len(t.methods) == 0 {
1219 return true
1222 // The same algorithm applies in both cases, but the
1223 // method tables for an interface type and a concrete type
1224 // are different, so the code is duplicated.
1225 // In both cases the algorithm is a linear scan over the two
1226 // lists - T's methods and V's methods - simultaneously.
1227 // Since method tables are stored in a unique sorted order
1228 // (alphabetical, with no duplicate method names), the scan
1229 // through V's methods must hit a match for each of T's
1230 // methods along the way, or else V does not implement T.
1231 // This lets us run the scan in overall linear time instead of
1232 // the quadratic time a naive search would require.
1233 // See also ../runtime/iface.go.
1234 if V.Kind() == Interface {
1235 v := (*interfaceType)(unsafe.Pointer(V))
1236 i := 0
1237 for j := 0; j < len(v.methods); j++ {
1238 tm := &t.methods[i]
1239 vm := &v.methods[j]
1240 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.typ).common() == toType(tm.typ).common() {
1241 if i++; i >= len(t.methods) {
1242 return true
1246 return false
1249 v := V.uncommon()
1250 if v == nil {
1251 return false
1253 i := 0
1254 for j := 0; j < len(v.methods); j++ {
1255 tm := &t.methods[i]
1256 vm := &v.methods[j]
1257 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.mtyp).common() == toType(tm.typ).common() {
1258 if i++; i >= len(t.methods) {
1259 return true
1263 return false
1266 // directlyAssignable reports whether a value x of type V can be directly
1267 // assigned (using memmove) to a value of type T.
1268 // https://golang.org/doc/go_spec.html#Assignability
1269 // Ignoring the interface rules (implemented elsewhere)
1270 // and the ideal constant rules (no ideal constants at run time).
1271 func directlyAssignable(T, V *rtype) bool {
1272 // x's type V is identical to T?
1273 if T == V {
1274 return true
1277 // Otherwise at least one of T and V must not be defined
1278 // and they must have the same kind.
1279 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
1280 return false
1283 // x's type T and V must have identical underlying types.
1284 return haveIdenticalUnderlyingType(T, V, true)
1287 func haveIdenticalType(T, V Type, cmpTags bool) bool {
1288 if cmpTags {
1289 return T == V
1292 if T.Name() != V.Name() || T.Kind() != V.Kind() {
1293 return false
1296 return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1299 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1300 if T == V {
1301 return true
1304 kind := T.Kind()
1305 if kind != V.Kind() {
1306 return false
1309 // Non-composite types of equal kind have same underlying type
1310 // (the predefined instance of the type).
1311 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1312 return true
1315 // Composite types.
1316 switch kind {
1317 case Array:
1318 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1320 case Chan:
1321 // Special case:
1322 // x is a bidirectional channel value, T is a channel type,
1323 // and x's type V and T have identical element types.
1324 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
1325 return true
1328 // Otherwise continue test for identical underlying type.
1329 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1331 case Func:
1332 t := (*funcType)(unsafe.Pointer(T))
1333 v := (*funcType)(unsafe.Pointer(V))
1334 if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
1335 return false
1337 for i, typ := range t.in {
1338 if !haveIdenticalType(typ, v.in[i], cmpTags) {
1339 return false
1342 for i, typ := range t.out {
1343 if !haveIdenticalType(typ, v.out[i], cmpTags) {
1344 return false
1347 return true
1349 case Interface:
1350 t := (*interfaceType)(unsafe.Pointer(T))
1351 v := (*interfaceType)(unsafe.Pointer(V))
1352 if len(t.methods) == 0 && len(v.methods) == 0 {
1353 return true
1355 // Might have the same methods but still
1356 // need a run time conversion.
1357 return false
1359 case Map:
1360 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1362 case Ptr, Slice:
1363 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1365 case Struct:
1366 t := (*structType)(unsafe.Pointer(T))
1367 v := (*structType)(unsafe.Pointer(V))
1368 if len(t.fields) != len(v.fields) {
1369 return false
1371 for i := range t.fields {
1372 tf := &t.fields[i]
1373 vf := &v.fields[i]
1374 if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
1375 return false
1377 if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
1378 return false
1380 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1381 return false
1383 if cmpTags && tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
1384 return false
1386 if tf.offsetEmbed != vf.offsetEmbed {
1387 return false
1390 return true
1393 return false
1396 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1397 var lookupCache sync.Map // map[cacheKey]*rtype
1399 // A cacheKey is the key for use in the lookupCache.
1400 // Four values describe any of the types we are looking for:
1401 // type kind, one or two subtypes, and an extra integer.
1402 type cacheKey struct {
1403 kind Kind
1404 t1 *rtype
1405 t2 *rtype
1406 extra uintptr
1409 // The funcLookupCache caches FuncOf lookups.
1410 // FuncOf does not share the common lookupCache since cacheKey is not
1411 // sufficient to represent functions unambiguously.
1412 var funcLookupCache struct {
1413 sync.Mutex // Guards stores (but not loads) on m.
1415 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1416 // Elements of m are append-only and thus safe for concurrent reading.
1417 m sync.Map
1420 // ChanOf returns the channel type with the given direction and element type.
1421 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1423 // The gc runtime imposes a limit of 64 kB on channel element types.
1424 // If t's size is equal to or exceeds this limit, ChanOf panics.
1425 func ChanOf(dir ChanDir, t Type) Type {
1426 typ := t.(*rtype)
1428 // Look in cache.
1429 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1430 if ch, ok := lookupCache.Load(ckey); ok {
1431 return ch.(*rtype)
1434 // This restriction is imposed by the gc compiler and the runtime.
1435 if typ.size >= 1<<16 {
1436 panic("reflect.ChanOf: element size too large")
1439 // Look in known types.
1440 // TODO: Precedence when constructing string.
1441 var s string
1442 switch dir {
1443 default:
1444 panic("reflect.ChanOf: invalid dir")
1445 case SendDir:
1446 s = "chan<- " + *typ.string
1447 case RecvDir:
1448 s = "<-chan " + *typ.string
1449 case BothDir:
1450 s = "chan " + *typ.string
1453 // Make a channel type.
1454 var ichan interface{} = (chan unsafe.Pointer)(nil)
1455 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1456 ch := *prototype
1457 ch.dir = uintptr(dir)
1458 ch.string = &s
1460 // gccgo uses a different hash.
1461 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1462 ch.hash = 0
1463 if dir&SendDir != 0 {
1464 ch.hash += 1
1466 if dir&RecvDir != 0 {
1467 ch.hash += 2
1469 ch.hash += typ.hash << 2
1470 ch.hash <<= 3
1471 ch.hash += 15
1473 ch.elem = typ
1474 ch.uncommonType = nil
1475 ch.ptrToThis = nil
1477 // Canonicalize before storing in lookupCache
1478 ti := toType(&ch.rtype)
1479 lookupCache.Store(ckey, ti.(*rtype))
1480 return ti
1483 func ismapkey(*rtype) bool // implemented in runtime
1485 // MapOf returns the map type with the given key and element types.
1486 // For example, if k represents int and e represents string,
1487 // MapOf(k, e) represents map[int]string.
1489 // If the key type is not a valid map key type (that is, if it does
1490 // not implement Go's == operator), MapOf panics.
1491 func MapOf(key, elem Type) Type {
1492 ktyp := key.(*rtype)
1493 etyp := elem.(*rtype)
1495 if !ismapkey(ktyp) {
1496 panic("reflect.MapOf: invalid key type " + ktyp.String())
1499 // Look in cache.
1500 ckey := cacheKey{Map, ktyp, etyp, 0}
1501 if mt, ok := lookupCache.Load(ckey); ok {
1502 return mt.(Type)
1505 // Look in known types.
1506 s := "map[" + *ktyp.string + "]" + *etyp.string
1508 // Make a map type.
1509 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1510 mt := **(**mapType)(unsafe.Pointer(&imap))
1511 mt.string = &s
1513 // gccgo uses a different hash
1514 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1515 mt.hash = ktyp.hash + etyp.hash + 2 + 14
1517 mt.key = ktyp
1518 mt.elem = etyp
1519 mt.uncommonType = nil
1520 mt.ptrToThis = nil
1522 mt.bucket = bucketOf(ktyp, etyp)
1523 if ktyp.size > maxKeySize {
1524 mt.keysize = uint8(ptrSize)
1525 mt.indirectkey = 1
1526 } else {
1527 mt.keysize = uint8(ktyp.size)
1528 mt.indirectkey = 0
1530 if etyp.size > maxValSize {
1531 mt.valuesize = uint8(ptrSize)
1532 mt.indirectvalue = 1
1533 } else {
1534 mt.valuesize = uint8(etyp.size)
1535 mt.indirectvalue = 0
1537 mt.bucketsize = uint16(mt.bucket.size)
1538 mt.reflexivekey = isReflexive(ktyp)
1539 mt.needkeyupdate = needKeyUpdate(ktyp)
1541 // Canonicalize before storing in lookupCache
1542 ti := toType(&mt.rtype)
1543 lookupCache.Store(ckey, ti.(*rtype))
1544 return ti
1547 // FuncOf returns the function type with the given argument and result types.
1548 // For example if k represents int and e represents string,
1549 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1551 // The variadic argument controls whether the function is variadic. FuncOf
1552 // panics if the in[len(in)-1] does not represent a slice and variadic is
1553 // true.
1554 func FuncOf(in, out []Type, variadic bool) Type {
1555 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1556 panic("reflect.FuncOf: last arg of variadic func must be slice")
1559 // Make a func type.
1560 var ifunc interface{} = (func())(nil)
1561 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1562 ft := new(funcType)
1563 *ft = *prototype
1565 // Build a hash and minimally populate ft.
1566 var hash uint32
1567 var fin, fout []*rtype
1568 shift := uint(1)
1569 for _, in := range in {
1570 t := in.(*rtype)
1571 fin = append(fin, t)
1572 hash += t.hash << shift
1573 shift++
1575 shift = 2
1576 for _, out := range out {
1577 t := out.(*rtype)
1578 fout = append(fout, t)
1579 hash += t.hash << shift
1580 shift++
1582 if variadic {
1583 hash++
1585 hash <<= 4
1586 hash += 8
1587 ft.hash = hash
1588 ft.in = fin
1589 ft.out = fout
1590 ft.dotdotdot = variadic
1592 // Look in cache.
1593 if ts, ok := funcLookupCache.m.Load(hash); ok {
1594 for _, t := range ts.([]*rtype) {
1595 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1596 return t
1601 // Not in cache, lock and retry.
1602 funcLookupCache.Lock()
1603 defer funcLookupCache.Unlock()
1604 if ts, ok := funcLookupCache.m.Load(hash); ok {
1605 for _, t := range ts.([]*rtype) {
1606 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1607 return t
1612 addToCache := func(tt *rtype) Type {
1613 var rts []*rtype
1614 if rti, ok := funcLookupCache.m.Load(hash); ok {
1615 rts = rti.([]*rtype)
1617 funcLookupCache.m.Store(hash, append(rts, tt))
1618 return tt
1621 str := funcStr(ft)
1623 // Populate the remaining fields of ft and store in cache.
1624 ft.string = &str
1625 ft.uncommonType = nil
1626 ft.ptrToThis = nil
1628 // Canonicalize before storing in funcLookupCache
1629 tc := toType(&ft.rtype)
1630 return addToCache(tc.(*rtype))
1633 // funcStr builds a string representation of a funcType.
1634 func funcStr(ft *funcType) string {
1635 repr := make([]byte, 0, 64)
1636 repr = append(repr, "func("...)
1637 for i, t := range ft.in {
1638 if i > 0 {
1639 repr = append(repr, ", "...)
1641 if ft.dotdotdot && i == len(ft.in)-1 {
1642 repr = append(repr, "..."...)
1643 repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
1644 } else {
1645 repr = append(repr, *t.string...)
1648 repr = append(repr, ')')
1649 if l := len(ft.out); l == 1 {
1650 repr = append(repr, ' ')
1651 } else if l > 1 {
1652 repr = append(repr, " ("...)
1654 for i, t := range ft.out {
1655 if i > 0 {
1656 repr = append(repr, ", "...)
1658 repr = append(repr, *t.string...)
1660 if len(ft.out) > 1 {
1661 repr = append(repr, ')')
1663 return string(repr)
1666 // isReflexive reports whether the == operation on the type is reflexive.
1667 // That is, x == x for all values x of type t.
1668 func isReflexive(t *rtype) bool {
1669 switch t.Kind() {
1670 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
1671 return true
1672 case Float32, Float64, Complex64, Complex128, Interface:
1673 return false
1674 case Array:
1675 tt := (*arrayType)(unsafe.Pointer(t))
1676 return isReflexive(tt.elem)
1677 case Struct:
1678 tt := (*structType)(unsafe.Pointer(t))
1679 for _, f := range tt.fields {
1680 if !isReflexive(f.typ) {
1681 return false
1684 return true
1685 default:
1686 // Func, Map, Slice, Invalid
1687 panic("isReflexive called on non-key type " + t.String())
1691 // needKeyUpdate reports whether map overwrites require the key to be copied.
1692 func needKeyUpdate(t *rtype) bool {
1693 switch t.Kind() {
1694 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
1695 return false
1696 case Float32, Float64, Complex64, Complex128, Interface, String:
1697 // Float keys can be updated from +0 to -0.
1698 // String keys can be updated to use a smaller backing store.
1699 // Interfaces might have floats of strings in them.
1700 return true
1701 case Array:
1702 tt := (*arrayType)(unsafe.Pointer(t))
1703 return needKeyUpdate(tt.elem)
1704 case Struct:
1705 tt := (*structType)(unsafe.Pointer(t))
1706 for _, f := range tt.fields {
1707 if needKeyUpdate(f.typ) {
1708 return true
1711 return false
1712 default:
1713 // Func, Map, Slice, Invalid
1714 panic("needKeyUpdate called on non-key type " + t.String())
1718 // Make sure these routines stay in sync with ../../runtime/map.go!
1719 // These types exist only for GC, so we only fill out GC relevant info.
1720 // Currently, that's just size and the GC program. We also fill in string
1721 // for possible debugging use.
1722 const (
1723 bucketSize uintptr = 8
1724 maxKeySize uintptr = 128
1725 maxValSize uintptr = 128
1728 func bucketOf(ktyp, etyp *rtype) *rtype {
1729 // See comment on hmap.overflow in ../runtime/map.go.
1730 var kind uint8
1731 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
1732 ktyp.size <= maxKeySize && etyp.size <= maxValSize {
1733 kind = kindNoPointers
1736 if ktyp.size > maxKeySize {
1737 ktyp = PtrTo(ktyp).(*rtype)
1739 if etyp.size > maxValSize {
1740 etyp = PtrTo(etyp).(*rtype)
1743 // Prepare GC data if any.
1744 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1745 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1746 // Note that since the key and value are known to be <= 128 bytes,
1747 // they're guaranteed to have bitmaps instead of GC programs.
1748 var gcdata *byte
1749 var ptrdata uintptr
1751 size := bucketSize
1752 size = align(size, uintptr(ktyp.fieldAlign))
1753 size += bucketSize * ktyp.size
1754 size = align(size, uintptr(etyp.fieldAlign))
1755 size += bucketSize * etyp.size
1757 maxAlign := uintptr(ktyp.fieldAlign)
1758 if maxAlign < uintptr(etyp.fieldAlign) {
1759 maxAlign = uintptr(etyp.fieldAlign)
1761 if maxAlign > ptrSize {
1762 size = align(size, maxAlign)
1763 size += align(ptrSize, maxAlign) - ptrSize
1764 } else if maxAlign < ptrSize {
1765 size = align(size, ptrSize)
1766 maxAlign = ptrSize
1769 ovoff := size
1770 size += ptrSize
1772 if kind != kindNoPointers {
1773 nptr := size / ptrSize
1774 mask := make([]byte, (nptr+7)/8)
1775 psize := bucketSize
1776 psize = align(psize, uintptr(ktyp.fieldAlign))
1777 base := psize / ptrSize
1779 if ktyp.kind&kindNoPointers == 0 {
1780 if ktyp.kind&kindGCProg != 0 {
1781 panic("reflect: unexpected GC program in MapOf")
1783 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
1784 for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
1785 if (kmask[i/8]>>(i%8))&1 != 0 {
1786 for j := uintptr(0); j < bucketSize; j++ {
1787 word := base + j*ktyp.size/ptrSize + i
1788 mask[word/8] |= 1 << (word % 8)
1793 psize += bucketSize * ktyp.size
1794 psize = align(psize, uintptr(etyp.fieldAlign))
1795 base = psize / ptrSize
1797 if etyp.kind&kindNoPointers == 0 {
1798 if etyp.kind&kindGCProg != 0 {
1799 panic("reflect: unexpected GC program in MapOf")
1801 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
1802 for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
1803 if (emask[i/8]>>(i%8))&1 != 0 {
1804 for j := uintptr(0); j < bucketSize; j++ {
1805 word := base + j*etyp.size/ptrSize + i
1806 mask[word/8] |= 1 << (word % 8)
1812 word := ovoff / ptrSize
1813 mask[word/8] |= 1 << (word % 8)
1814 gcdata = &mask[0]
1815 ptrdata = (word + 1) * ptrSize
1817 // overflow word must be last
1818 if ptrdata != size {
1819 panic("reflect: bad layout computation in MapOf")
1823 b := &rtype{
1824 align: int8(maxAlign),
1825 fieldAlign: uint8(maxAlign),
1826 size: size,
1827 kind: kind,
1828 ptrdata: ptrdata,
1829 gcdata: gcdata,
1831 s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
1832 b.string = &s
1833 return b
1836 // SliceOf returns the slice type with element type t.
1837 // For example, if t represents int, SliceOf(t) represents []int.
1838 func SliceOf(t Type) Type {
1839 typ := t.(*rtype)
1841 // Look in cache.
1842 ckey := cacheKey{Slice, typ, nil, 0}
1843 if slice, ok := lookupCache.Load(ckey); ok {
1844 return slice.(Type)
1847 // Look in known types.
1848 s := "[]" + *typ.string
1850 // Make a slice type.
1851 var islice interface{} = ([]unsafe.Pointer)(nil)
1852 prototype := *(**sliceType)(unsafe.Pointer(&islice))
1853 slice := *prototype
1854 slice.string = &s
1856 // gccgo uses a different hash.
1857 // slice.hash = fnv1(typ.hash, '[')
1858 slice.hash = typ.hash + 1 + 13
1860 slice.elem = typ
1861 slice.uncommonType = nil
1862 slice.ptrToThis = nil
1864 // Canonicalize before storing in lookupCache
1865 ti := toType(&slice.rtype)
1866 lookupCache.Store(ckey, ti.(*rtype))
1867 return ti
1870 // The structLookupCache caches StructOf lookups.
1871 // StructOf does not share the common lookupCache since we need to pin
1872 // the memory associated with *structTypeFixedN.
1873 var structLookupCache struct {
1874 sync.Mutex // Guards stores (but not loads) on m.
1876 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
1877 // Elements in m are append-only and thus safe for concurrent reading.
1878 m sync.Map
1881 // isLetter returns true if a given 'rune' is classified as a Letter.
1882 func isLetter(ch rune) bool {
1883 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
1886 // isValidFieldName checks if a string is a valid (struct) field name or not.
1888 // According to the language spec, a field name should be an identifier.
1890 // identifier = letter { letter | unicode_digit } .
1891 // letter = unicode_letter | "_" .
1892 func isValidFieldName(fieldName string) bool {
1893 for i, c := range fieldName {
1894 if i == 0 && !isLetter(c) {
1895 return false
1898 if !(isLetter(c) || unicode.IsDigit(c)) {
1899 return false
1903 return len(fieldName) > 0
1906 // StructOf returns the struct type containing fields.
1907 // The Offset and Index fields are ignored and computed as they would be
1908 // by the compiler.
1910 // StructOf currently does not generate wrapper methods for embedded
1911 // fields and panics if passed unexported StructFields.
1912 // These limitations may be lifted in a future version.
1913 func StructOf(fields []StructField) Type {
1914 var (
1915 hash = uint32(12)
1916 size uintptr
1917 typalign int8
1918 comparable = true
1919 hashable = true
1921 fs = make([]structField, len(fields))
1922 repr = make([]byte, 0, 64)
1923 fset = map[string]struct{}{} // fields' names
1925 hasPtr = false // records whether at least one struct-field is a pointer
1926 hasGCProg = false // records whether a struct-field type has a GCProg
1929 lastzero := uintptr(0)
1930 repr = append(repr, "struct {"...)
1931 for i, field := range fields {
1932 if field.Name == "" {
1933 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
1935 if !isValidFieldName(field.Name) {
1936 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
1938 if field.Type == nil {
1939 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
1941 f := runtimeStructField(field)
1942 ft := f.typ
1943 if ft.kind&kindGCProg != 0 {
1944 hasGCProg = true
1946 if ft.pointers() {
1947 hasPtr = true
1950 // Update string and hash
1951 name := *f.name
1952 hash = (hash << 1) + ft.hash
1953 if !f.embedded() {
1954 repr = append(repr, (" " + name)...)
1955 } else {
1956 // Embedded field
1957 repr = append(repr, " ?"...)
1958 if f.typ.Kind() == Ptr {
1959 // Embedded ** and *interface{} are illegal
1960 elem := ft.Elem()
1961 if k := elem.Kind(); k == Ptr || k == Interface {
1962 panic("reflect.StructOf: illegal embedded field type " + ft.String())
1964 name = elem.String()
1965 } else {
1966 name = ft.String()
1969 switch f.typ.Kind() {
1970 case Interface:
1971 ift := (*interfaceType)(unsafe.Pointer(ft))
1972 if len(ift.methods) > 0 {
1973 panic("reflect.StructOf: embedded field with methods not implemented")
1975 case Ptr:
1976 ptr := (*ptrType)(unsafe.Pointer(ft))
1977 if unt := ptr.uncommon(); unt != nil {
1978 if len(unt.methods) > 0 {
1979 panic("reflect.StructOf: embedded field with methods not implemented")
1982 if unt := ptr.elem.uncommon(); unt != nil {
1983 if len(unt.methods) > 0 {
1984 panic("reflect.StructOf: embedded field with methods not implemented")
1987 default:
1988 if unt := ft.uncommon(); unt != nil {
1989 if len(unt.methods) > 0 {
1990 panic("reflect.StructOf: embedded field with methods not implemented")
1995 if _, dup := fset[name]; dup {
1996 panic("reflect.StructOf: duplicate field " + name)
1998 fset[name] = struct{}{}
2000 repr = append(repr, (" " + *ft.string)...)
2001 if f.tag != nil {
2002 repr = append(repr, (" " + strconv.Quote(*f.tag))...)
2004 if i < len(fields)-1 {
2005 repr = append(repr, ';')
2008 comparable = comparable && (ft.equalfn != nil)
2009 hashable = hashable && (ft.hashfn != nil)
2011 offset := align(size, uintptr(ft.fieldAlign))
2012 if int8(ft.fieldAlign) > typalign {
2013 typalign = int8(ft.fieldAlign)
2015 size = offset + ft.size
2016 f.offsetEmbed |= offset << 1
2018 if ft.size == 0 {
2019 lastzero = size
2022 fs[i] = f
2025 if size > 0 && lastzero == size {
2026 // This is a non-zero sized struct that ends in a
2027 // zero-sized field. We add an extra byte of padding,
2028 // to ensure that taking the address of the final
2029 // zero-sized field can't manufacture a pointer to the
2030 // next object in the heap. See issue 9401.
2031 size++
2034 if len(fs) > 0 {
2035 repr = append(repr, ' ')
2037 repr = append(repr, '}')
2038 hash <<= 2
2039 str := string(repr)
2041 // Round the size up to be a multiple of the alignment.
2042 size = align(size, uintptr(typalign))
2044 // Make the struct type.
2045 var istruct interface{} = struct{}{}
2046 prototype := *(**structType)(unsafe.Pointer(&istruct))
2047 typ := new(structType)
2048 *typ = *prototype
2049 typ.fields = fs
2051 // Look in cache.
2052 if ts, ok := structLookupCache.m.Load(hash); ok {
2053 for _, st := range ts.([]Type) {
2054 t := st.common()
2055 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2056 return t
2061 // Not in cache, lock and retry.
2062 structLookupCache.Lock()
2063 defer structLookupCache.Unlock()
2064 if ts, ok := structLookupCache.m.Load(hash); ok {
2065 for _, st := range ts.([]Type) {
2066 t := st.common()
2067 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2068 return t
2073 addToCache := func(t Type) Type {
2074 var ts []Type
2075 if ti, ok := structLookupCache.m.Load(hash); ok {
2076 ts = ti.([]Type)
2078 structLookupCache.m.Store(hash, append(ts, t))
2079 return t
2082 typ.string = &str
2083 typ.hash = hash
2084 typ.size = size
2085 typ.align = typalign
2086 typ.fieldAlign = uint8(typalign)
2087 if !hasPtr {
2088 typ.kind |= kindNoPointers
2089 } else {
2090 typ.kind &^= kindNoPointers
2093 if hasGCProg {
2094 lastPtrField := 0
2095 for i, ft := range fs {
2096 if ft.typ.pointers() {
2097 lastPtrField = i
2100 prog := []byte{0, 0, 0, 0} // will be length of prog
2101 for i, ft := range fs {
2102 if i > lastPtrField {
2103 // gcprog should not include anything for any field after
2104 // the last field that contains pointer data
2105 break
2107 // FIXME(sbinet) handle padding, fields smaller than a word
2108 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:]
2109 elemPtrs := ft.typ.ptrdata / ptrSize
2110 switch {
2111 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0:
2112 // Element is small with pointer mask; use as literal bits.
2113 mask := elemGC
2114 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2115 var n uintptr
2116 for n := elemPtrs; n > 120; n -= 120 {
2117 prog = append(prog, 120)
2118 prog = append(prog, mask[:15]...)
2119 mask = mask[15:]
2121 prog = append(prog, byte(n))
2122 prog = append(prog, mask[:(n+7)/8]...)
2123 case ft.typ.kind&kindGCProg != 0:
2124 // Element has GC program; emit one element.
2125 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2126 prog = append(prog, elemProg...)
2128 // Pad from ptrdata to size.
2129 elemWords := ft.typ.size / ptrSize
2130 if elemPtrs < elemWords {
2131 // Emit literal 0 bit, then repeat as needed.
2132 prog = append(prog, 0x01, 0x00)
2133 if elemPtrs+1 < elemWords {
2134 prog = append(prog, 0x81)
2135 prog = appendVarint(prog, elemWords-elemPtrs-1)
2139 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2140 typ.kind |= kindGCProg
2141 typ.gcdata = &prog[0]
2142 } else {
2143 typ.kind &^= kindGCProg
2144 bv := new(bitVector)
2145 addTypeBits(bv, 0, typ.common())
2146 if len(bv.data) > 0 {
2147 typ.gcdata = &bv.data[0]
2150 typ.ptrdata = typeptrdata(typ.common())
2152 if hashable {
2153 typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
2154 o := seed
2155 for _, ft := range typ.fields {
2156 pi := add(p, ft.offset(), "&x.field safe")
2157 o = ft.typ.hashfn(pi, o)
2159 return o
2161 } else {
2162 typ.hashfn = nil
2165 if comparable {
2166 typ.equalfn = func(p, q unsafe.Pointer) bool {
2167 for _, ft := range typ.fields {
2168 pi := add(p, ft.offset(), "&x.field safe")
2169 qi := add(q, ft.offset(), "&x.field safe")
2170 if !ft.typ.equalfn(pi, qi) {
2171 return false
2174 return true
2176 } else {
2177 typ.equalfn = nil
2180 typ.kind &^= kindDirectIface
2181 typ.uncommonType = nil
2182 typ.ptrToThis = nil
2184 // Canonicalize before storing in structLookupCache
2185 ti := toType(&typ.rtype)
2186 return addToCache(ti.(*rtype))
2189 func runtimeStructField(field StructField) structField {
2190 if field.PkgPath != "" {
2191 panic("reflect.StructOf: StructOf does not allow unexported fields")
2194 // Best-effort check for misuse.
2195 // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
2196 c := field.Name[0]
2197 if 'a' <= c && c <= 'z' || c == '_' {
2198 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2201 offsetEmbed := uintptr(0)
2202 if field.Anonymous {
2203 offsetEmbed |= 1
2206 s := field.Name
2207 name := &s
2209 var tag *string
2210 if field.Tag != "" {
2211 st := string(field.Tag)
2212 tag = &st
2215 return structField{
2216 name: name,
2217 pkgPath: nil,
2218 typ: field.Type.common(),
2219 tag: tag,
2220 offsetEmbed: offsetEmbed,
2224 // typeptrdata returns the length in bytes of the prefix of t
2225 // containing pointer data. Anything after this offset is scalar data.
2226 // keep in sync with ../cmd/compile/internal/gc/reflect.go
2227 func typeptrdata(t *rtype) uintptr {
2228 if !t.pointers() {
2229 return 0
2231 switch t.Kind() {
2232 case Struct:
2233 st := (*structType)(unsafe.Pointer(t))
2234 // find the last field that has pointers.
2235 field := 0
2236 for i := range st.fields {
2237 ft := st.fields[i].typ
2238 if ft.pointers() {
2239 field = i
2242 f := st.fields[field]
2243 return f.offset() + f.typ.ptrdata
2245 default:
2246 panic("reflect.typeptrdata: unexpected type, " + t.String())
2250 // See cmd/compile/internal/gc/reflect.go for derivation of constant.
2251 const maxPtrmaskBytes = 2048
2253 // ArrayOf returns the array type with the given count and element type.
2254 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2256 // If the resulting type would be larger than the available address space,
2257 // ArrayOf panics.
2258 func ArrayOf(count int, elem Type) Type {
2259 typ := elem.(*rtype)
2261 // Look in cache.
2262 ckey := cacheKey{Array, typ, nil, uintptr(count)}
2263 if array, ok := lookupCache.Load(ckey); ok {
2264 return array.(Type)
2267 // Look in known types.
2268 s := "[" + strconv.Itoa(count) + "]" + *typ.string
2270 // Make an array type.
2271 var iarray interface{} = [1]unsafe.Pointer{}
2272 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2273 array := *prototype
2274 array.string = &s
2276 // gccgo uses a different hash.
2277 // array.hash = fnv1(typ.hash, '[')
2278 // for n := uint32(count); n > 0; n >>= 8 {
2279 // array.hash = fnv1(array.hash, byte(n))
2280 // }
2281 // array.hash = fnv1(array.hash, ']')
2282 array.hash = typ.hash + 1 + 13
2284 array.elem = typ
2285 array.ptrToThis = nil
2286 if typ.size > 0 {
2287 max := ^uintptr(0) / typ.size
2288 if uintptr(count) > max {
2289 panic("reflect.ArrayOf: array size would exceed virtual address space")
2292 array.size = typ.size * uintptr(count)
2293 if count > 0 && typ.ptrdata != 0 {
2294 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
2296 array.align = typ.align
2297 array.fieldAlign = typ.fieldAlign
2298 array.uncommonType = nil
2299 array.len = uintptr(count)
2300 array.slice = SliceOf(elem).(*rtype)
2302 array.kind &^= kindNoPointers
2303 switch {
2304 case typ.kind&kindNoPointers != 0 || array.size == 0:
2305 // No pointers.
2306 array.kind |= kindNoPointers
2307 array.gcdata = nil
2308 array.ptrdata = 0
2310 case count == 1:
2311 // In memory, 1-element array looks just like the element.
2312 array.kind |= typ.kind & kindGCProg
2313 array.gcdata = typ.gcdata
2314 array.ptrdata = typ.ptrdata
2316 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
2317 // Element is small with pointer mask; array is still small.
2318 // Create direct pointer mask by turning each 1 bit in elem
2319 // into count 1 bits in larger mask.
2320 mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
2321 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2322 elemWords := typ.size / ptrSize
2323 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ {
2324 if (elemMask[j/8]>>(j%8))&1 != 0 {
2325 for i := uintptr(0); i < array.len; i++ {
2326 k := i*elemWords + j
2327 mask[k/8] |= 1 << (k % 8)
2331 array.gcdata = &mask[0]
2333 default:
2334 // Create program that emits one element
2335 // and then repeats to make the array.
2336 prog := []byte{0, 0, 0, 0} // will be length of prog
2337 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2338 elemPtrs := typ.ptrdata / ptrSize
2339 if typ.kind&kindGCProg == 0 {
2340 // Element is small with pointer mask; use as literal bits.
2341 mask := elemGC
2342 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2343 var n uintptr
2344 for n = elemPtrs; n > 120; n -= 120 {
2345 prog = append(prog, 120)
2346 prog = append(prog, mask[:15]...)
2347 mask = mask[15:]
2349 prog = append(prog, byte(n))
2350 prog = append(prog, mask[:(n+7)/8]...)
2351 } else {
2352 // Element has GC program; emit one element.
2353 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2354 prog = append(prog, elemProg...)
2356 // Pad from ptrdata to size.
2357 elemWords := typ.size / ptrSize
2358 if elemPtrs < elemWords {
2359 // Emit literal 0 bit, then repeat as needed.
2360 prog = append(prog, 0x01, 0x00)
2361 if elemPtrs+1 < elemWords {
2362 prog = append(prog, 0x81)
2363 prog = appendVarint(prog, elemWords-elemPtrs-1)
2366 // Repeat count-1 times.
2367 if elemWords < 0x80 {
2368 prog = append(prog, byte(elemWords|0x80))
2369 } else {
2370 prog = append(prog, 0x80)
2371 prog = appendVarint(prog, elemWords)
2373 prog = appendVarint(prog, uintptr(count)-1)
2374 prog = append(prog, 0)
2375 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2376 array.kind |= kindGCProg
2377 array.gcdata = &prog[0]
2378 array.ptrdata = array.size // overestimate but ok; must match program
2381 array.kind &^= kindDirectIface
2383 esize := typ.size
2385 if typ.equalfn == nil {
2386 array.equalfn = nil
2387 } else {
2388 eequal := typ.equalfn
2389 array.equalfn = func(p, q unsafe.Pointer) bool {
2390 for i := 0; i < count; i++ {
2391 pi := arrayAt(p, i, esize, "i < count")
2392 qi := arrayAt(q, i, esize, "i < count")
2393 if !eequal(pi, qi) {
2394 return false
2397 return true
2401 if typ.hashfn == nil {
2402 array.hashfn = nil
2403 } else {
2404 ehash := typ.hashfn
2405 array.hashfn = func(ptr unsafe.Pointer, seed uintptr) uintptr {
2406 o := seed
2407 for i := 0; i < count; i++ {
2408 o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
2410 return o
2414 // Canonicalize before storing in lookupCache
2415 ti := toType(&array.rtype)
2416 lookupCache.Store(ckey, ti.(*rtype))
2417 return ti
2420 func appendVarint(x []byte, v uintptr) []byte {
2421 for ; v >= 0x80; v >>= 7 {
2422 x = append(x, byte(v|0x80))
2424 x = append(x, byte(v))
2425 return x
2428 // toType converts from a *rtype to a Type that can be returned
2429 // to the client of package reflect. In gc, the only concern is that
2430 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2431 // function takes care of ensuring that multiple *rtype for the same
2432 // type are coalesced into a single Type.
2433 var canonicalType = make(map[string]Type)
2435 var canonicalTypeLock sync.RWMutex
2437 func canonicalize(t Type) Type {
2438 if t == nil {
2439 return nil
2441 s := t.rawString()
2442 canonicalTypeLock.RLock()
2443 if r, ok := canonicalType[s]; ok {
2444 canonicalTypeLock.RUnlock()
2445 return r
2447 canonicalTypeLock.RUnlock()
2448 canonicalTypeLock.Lock()
2449 if r, ok := canonicalType[s]; ok {
2450 canonicalTypeLock.Unlock()
2451 return r
2453 canonicalType[s] = t
2454 canonicalTypeLock.Unlock()
2455 return t
2458 func toType(p *rtype) Type {
2459 if p == nil {
2460 return nil
2462 return canonicalize(p)
2465 // ifaceIndir reports whether t is stored indirectly in an interface value.
2466 func ifaceIndir(t *rtype) bool {
2467 return t.kind&kindDirectIface == 0
2470 // Layout matches runtime.gobitvector (well enough).
2471 type bitVector struct {
2472 n uint32 // number of bits
2473 data []byte
2476 // append a bit to the bitmap.
2477 func (bv *bitVector) append(bit uint8) {
2478 if bv.n%8 == 0 {
2479 bv.data = append(bv.data, 0)
2481 bv.data[bv.n/8] |= bit << (bv.n % 8)
2482 bv.n++
2485 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
2486 if t.kind&kindNoPointers != 0 {
2487 return
2490 switch Kind(t.kind & kindMask) {
2491 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
2492 // 1 pointer at start of representation
2493 for bv.n < uint32(offset/uintptr(ptrSize)) {
2494 bv.append(0)
2496 bv.append(1)
2498 case Interface:
2499 // 2 pointers
2500 for bv.n < uint32(offset/uintptr(ptrSize)) {
2501 bv.append(0)
2503 bv.append(1)
2504 bv.append(1)
2506 case Array:
2507 // repeat inner type
2508 tt := (*arrayType)(unsafe.Pointer(t))
2509 for i := 0; i < int(tt.len); i++ {
2510 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
2513 case Struct:
2514 // apply fields
2515 tt := (*structType)(unsafe.Pointer(t))
2516 for i := range tt.fields {
2517 f := &tt.fields[i]
2518 addTypeBits(bv, offset+f.offset(), f.typ)