reflect: fix StructOf hash and string
[official-gcc.git] / libgo / go / reflect / type.go
blob07fe4d001c0e5a1a32c0ab083927fbb1a53eb567
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
9 //
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
12 // for that type.
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
16 package reflect
18 import (
19 "strconv"
20 "sync"
21 "unicode"
22 "unicode/utf8"
23 "unsafe"
26 // Type is the representation of a Go type.
28 // Not all methods apply to all kinds of types. Restrictions,
29 // if any, are noted in the documentation for each method.
30 // Use the Kind method to find out the kind of type before
31 // calling kind-specific methods. Calling a method
32 // inappropriate to the kind of type causes a run-time panic.
34 // Type values are comparable, such as with the == operator,
35 // so they can be used as map keys.
36 // Two Type values are equal if they represent identical types.
37 type Type interface {
38 // Methods applicable to all types.
40 // Align returns the alignment in bytes of a value of
41 // this type when allocated in memory.
42 Align() int
44 // FieldAlign returns the alignment in bytes of a value of
45 // this type when used as a field in a struct.
46 FieldAlign() int
48 // Method returns the i'th method in the type's method set.
49 // It panics if i is not in the range [0, NumMethod()).
51 // For a non-interface type T or *T, the returned Method's Type and Func
52 // fields describe a function whose first argument is the receiver.
54 // For an interface type, the returned Method's Type field gives the
55 // method signature, without a receiver, and the Func field is nil.
56 Method(int) Method
58 // MethodByName returns the method with that name in the type's
59 // method set and a boolean indicating if the method was found.
61 // For a non-interface type T or *T, the returned Method's Type and Func
62 // fields describe a function whose first argument is the receiver.
64 // For an interface type, the returned Method's Type field gives the
65 // method signature, without a receiver, and the Func field is nil.
66 MethodByName(string) (Method, bool)
68 // NumMethod returns the number of exported methods in the type's method set.
69 NumMethod() int
71 // Name returns the type's name within its package.
72 // It returns an empty string for unnamed types.
73 Name() string
75 // PkgPath returns a named type's package path, that is, the import path
76 // that uniquely identifies the package, such as "encoding/base64".
77 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int),
78 // the package path will be the empty string.
79 PkgPath() string
81 // Size returns the number of bytes needed to store
82 // a value of the given type; it is analogous to unsafe.Sizeof.
83 Size() uintptr
85 // String returns a string representation of the type.
86 // The string representation may use shortened package names
87 // (e.g., base64 instead of "encoding/base64") and is not
88 // guaranteed to be unique among types. To test for type identity,
89 // compare the Types directly.
90 String() string
92 // Used internally by gccgo--the string retaining quoting.
93 rawString() string
95 // Kind returns the specific kind of this type.
96 Kind() Kind
98 // Implements reports whether the type implements the interface type u.
99 Implements(u Type) bool
101 // AssignableTo reports whether a value of the type is assignable to type u.
102 AssignableTo(u Type) bool
104 // ConvertibleTo reports whether a value of the type is convertible to type u.
105 ConvertibleTo(u Type) bool
107 // Comparable reports whether values of this type are comparable.
108 Comparable() bool
110 // Methods applicable only to some types, depending on Kind.
111 // The methods allowed for each kind are:
113 // Int*, Uint*, Float*, Complex*: Bits
114 // Array: Elem, Len
115 // Chan: ChanDir, Elem
116 // Func: In, NumIn, Out, NumOut, IsVariadic.
117 // Map: Key, Elem
118 // Ptr: Elem
119 // Slice: Elem
120 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
122 // Bits returns the size of the type in bits.
123 // It panics if the type's Kind is not one of the
124 // sized or unsized Int, Uint, Float, or Complex kinds.
125 Bits() int
127 // ChanDir returns a channel type's direction.
128 // It panics if the type's Kind is not Chan.
129 ChanDir() ChanDir
131 // IsVariadic reports whether a function type's final input parameter
132 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
133 // implicit actual type []T.
135 // For concreteness, if t represents func(x int, y ... float64), then
137 // t.NumIn() == 2
138 // t.In(0) is the reflect.Type for "int"
139 // t.In(1) is the reflect.Type for "[]float64"
140 // t.IsVariadic() == true
142 // IsVariadic panics if the type's Kind is not Func.
143 IsVariadic() bool
145 // Elem returns a type's element type.
146 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
147 Elem() Type
149 // Field returns a struct type's i'th field.
150 // It panics if the type's Kind is not Struct.
151 // It panics if i is not in the range [0, NumField()).
152 Field(i int) StructField
154 // FieldByIndex returns the nested field corresponding
155 // to the index sequence. It is equivalent to calling Field
156 // successively for each index i.
157 // It panics if the type's Kind is not Struct.
158 FieldByIndex(index []int) StructField
160 // FieldByName returns the struct field with the given name
161 // and a boolean indicating if the field was found.
162 FieldByName(name string) (StructField, bool)
164 // FieldByNameFunc returns the struct field with a name
165 // that satisfies the match function and a boolean indicating if
166 // the field was found.
168 // FieldByNameFunc considers the fields in the struct itself
169 // and then the fields in any anonymous structs, in breadth first order,
170 // stopping at the shallowest nesting depth containing one or more
171 // fields satisfying the match function. If multiple fields at that depth
172 // satisfy the match function, they cancel each other
173 // and FieldByNameFunc returns no match.
174 // This behavior mirrors Go's handling of name lookup in
175 // structs containing anonymous fields.
176 FieldByNameFunc(match func(string) bool) (StructField, bool)
178 // In returns the type of a function type's i'th input parameter.
179 // It panics if the type's Kind is not Func.
180 // It panics if i is not in the range [0, NumIn()).
181 In(i int) Type
183 // Key returns a map type's key type.
184 // It panics if the type's Kind is not Map.
185 Key() Type
187 // Len returns an array type's length.
188 // It panics if the type's Kind is not Array.
189 Len() int
191 // NumField returns a struct type's field count.
192 // It panics if the type's Kind is not Struct.
193 NumField() int
195 // NumIn returns a function type's input parameter count.
196 // It panics if the type's Kind is not Func.
197 NumIn() int
199 // NumOut returns a function type's output parameter count.
200 // It panics if the type's Kind is not Func.
201 NumOut() int
203 // Out returns the type of a function type's i'th output parameter.
204 // It panics if the type's Kind is not Func.
205 // It panics if i is not in the range [0, NumOut()).
206 Out(i int) Type
208 common() *rtype
209 uncommon() *uncommonType
212 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
213 // if the names are equal, even if they are unexported names originating
214 // in different packages. The practical effect of this is that the result of
215 // t.FieldByName("x") is not well defined if the struct type t contains
216 // multiple fields named x (embedded from different packages).
217 // FieldByName may return one of the fields named x or may report that there are none.
218 // See https://golang.org/issue/4876 for more details.
221 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
222 * A few are known to ../runtime/type.go to convey to debuggers.
223 * They are also known to ../runtime/type.go.
226 // A Kind represents the specific kind of type that a Type represents.
227 // The zero Kind is not a valid kind.
228 type Kind uint
230 const (
231 Invalid Kind = iota
232 Bool
234 Int8
235 Int16
236 Int32
237 Int64
238 Uint
239 Uint8
240 Uint16
241 Uint32
242 Uint64
243 Uintptr
244 Float32
245 Float64
246 Complex64
247 Complex128
248 Array
249 Chan
250 Func
251 Interface
254 Slice
255 String
256 Struct
257 UnsafePointer
260 // rtype is the common implementation of most values.
261 // It is embedded in other, public struct types, but always
262 // with a unique tag like `reflect:"array"` or `reflect:"ptr"`
263 // so that code cannot convert from, say, *arrayType to *ptrType.
265 // rtype must be kept in sync with ../runtime/type.go:/^type._type.
266 type rtype struct {
267 size uintptr
268 ptrdata uintptr // size of memory prefix holding all pointers
269 hash uint32 // hash of type; avoids computation in hash tables
270 kind uint8 // enumeration for C
271 align int8 // alignment of variable with this type
272 fieldAlign uint8 // alignment of struct field with this type
273 _ uint8 // unused/padding
275 hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
276 equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
278 gcdata *byte // garbage collection data
279 string *string // string form; unnecessary but undeniably useful
280 *uncommonType // (relatively) uncommon fields
281 ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
284 // Method on non-interface type
285 type method struct {
286 name *string // name of method
287 pkgPath *string // nil for exported Names; otherwise import path
288 mtyp *rtype // method type (without receiver)
289 typ *rtype // .(*FuncType) underneath (with receiver)
290 tfn unsafe.Pointer // fn used for normal method call
293 // uncommonType is present only for types with names or methods
294 // (if T is a named type, the uncommonTypes for T and *T have methods).
295 // Using a pointer to this struct reduces the overall size required
296 // to describe an unnamed type with no methods.
297 type uncommonType struct {
298 name *string // name of type
299 pkgPath *string // import path; nil for built-in types like int, string
300 methods []method // methods associated with type
303 // ChanDir represents a channel type's direction.
304 type ChanDir int
306 const (
307 RecvDir ChanDir = 1 << iota // <-chan
308 SendDir // chan<-
309 BothDir = RecvDir | SendDir // chan
312 // arrayType represents a fixed array type.
313 type arrayType struct {
314 rtype `reflect:"array"`
315 elem *rtype // array element type
316 slice *rtype // slice type
317 len uintptr
320 // chanType represents a channel type.
321 type chanType struct {
322 rtype `reflect:"chan"`
323 elem *rtype // channel element type
324 dir uintptr // channel direction (ChanDir)
327 // funcType represents a function type.
328 type funcType struct {
329 rtype `reflect:"func"`
330 dotdotdot bool // last input parameter is ...
331 in []*rtype // input parameter types
332 out []*rtype // output parameter types
335 // imethod represents a method on an interface type
336 type imethod struct {
337 name *string // name of method
338 pkgPath *string // nil for exported Names; otherwise import path
339 typ *rtype // .(*FuncType) underneath
342 // interfaceType represents an interface type.
343 type interfaceType struct {
344 rtype `reflect:"interface"`
345 methods []imethod // sorted by hash
348 // mapType represents a map type.
349 type mapType struct {
350 rtype `reflect:"map"`
351 key *rtype // map key type
352 elem *rtype // map element (value) type
353 bucket *rtype // internal bucket structure
354 hmap *rtype // internal map header
355 keysize uint8 // size of key slot
356 indirectkey uint8 // store ptr to key instead of key itself
357 valuesize uint8 // size of value slot
358 indirectvalue uint8 // store ptr to value instead of value itself
359 bucketsize uint16 // size of bucket
360 reflexivekey bool // true if k==k for all keys
361 needkeyupdate bool // true if we need to update key on an overwrite
364 // ptrType represents a pointer type.
365 type ptrType struct {
366 rtype `reflect:"ptr"`
367 elem *rtype // pointer element (pointed at) type
370 // sliceType represents a slice type.
371 type sliceType struct {
372 rtype `reflect:"slice"`
373 elem *rtype // slice element type
376 // Struct field
377 type structField struct {
378 name *string // name is always non-empty
379 pkgPath *string // nil for exported Names; otherwise import path
380 typ *rtype // type of field
381 tag *string // nil if no tag
382 offsetAnon uintptr // byte offset of field<<1 | isAnonymous
385 func (f *structField) offset() uintptr {
386 return f.offsetAnon >> 1
389 func (f *structField) anon() bool {
390 return f.offsetAnon&1 != 0
393 // structType represents a struct type.
394 type structType struct {
395 rtype `reflect:"struct"`
396 fields []structField // sorted by offset
400 * The compiler knows the exact layout of all the data structures above.
401 * The compiler does not know about the data structures and methods below.
404 // Method represents a single method.
405 type Method struct {
406 // Name is the method name.
407 // PkgPath is the package path that qualifies a lower case (unexported)
408 // method name. It is empty for upper case (exported) method names.
409 // The combination of PkgPath and Name uniquely identifies a method
410 // in a method set.
411 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
412 Name string
413 PkgPath string
415 Type Type // method type
416 Func Value // func with receiver as first argument
417 Index int // index for Type.Method
420 const (
421 kindDirectIface = 1 << 5
422 kindGCProg = 1 << 6 // Type.gc points to GC program
423 kindNoPointers = 1 << 7
424 kindMask = (1 << 5) - 1
427 func (k Kind) String() string {
428 if int(k) < len(kindNames) {
429 return kindNames[k]
431 return "kind" + strconv.Itoa(int(k))
434 var kindNames = []string{
435 Invalid: "invalid",
436 Bool: "bool",
437 Int: "int",
438 Int8: "int8",
439 Int16: "int16",
440 Int32: "int32",
441 Int64: "int64",
442 Uint: "uint",
443 Uint8: "uint8",
444 Uint16: "uint16",
445 Uint32: "uint32",
446 Uint64: "uint64",
447 Uintptr: "uintptr",
448 Float32: "float32",
449 Float64: "float64",
450 Complex64: "complex64",
451 Complex128: "complex128",
452 Array: "array",
453 Chan: "chan",
454 Func: "func",
455 Interface: "interface",
456 Map: "map",
457 Ptr: "ptr",
458 Slice: "slice",
459 String: "string",
460 Struct: "struct",
461 UnsafePointer: "unsafe.Pointer",
464 func (t *uncommonType) uncommon() *uncommonType {
465 return t
468 func (t *uncommonType) PkgPath() string {
469 if t == nil || t.pkgPath == nil {
470 return ""
472 return *t.pkgPath
475 func (t *uncommonType) Name() string {
476 if t == nil || t.name == nil {
477 return ""
479 return *t.name
482 func (t *rtype) rawString() string { return *t.string }
484 func (t *rtype) String() string {
485 // For gccgo, strip out quoted strings.
486 s := *t.string
487 var q bool
488 r := make([]byte, len(s))
489 j := 0
490 for i := 0; i < len(s); i++ {
491 if s[i] == '\t' {
492 q = !q
493 } else if !q {
494 r[j] = s[i]
498 return string(r[:j])
501 func (t *rtype) Size() uintptr { return t.size }
503 func (t *rtype) Bits() int {
504 if t == nil {
505 panic("reflect: Bits of nil Type")
507 k := t.Kind()
508 if k < Int || k > Complex128 {
509 panic("reflect: Bits of non-arithmetic Type " + t.String())
511 return int(t.size) * 8
514 func (t *rtype) Align() int { return int(t.align) }
516 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
518 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
520 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
522 func (t *rtype) common() *rtype { return t }
524 var methodCache sync.Map // map[*rtype][]method
526 func (t *rtype) exportedMethods() []method {
527 methodsi, found := methodCache.Load(t)
528 if found {
529 return methodsi.([]method)
532 ut := t.uncommon()
533 if ut == nil {
534 return nil
536 allm := ut.methods
537 allExported := true
538 for _, m := range allm {
539 if m.pkgPath != nil {
540 allExported = false
541 break
544 var methods []method
545 if allExported {
546 methods = allm
547 } else {
548 methods = make([]method, 0, len(allm))
549 for _, m := range allm {
550 if m.pkgPath == nil {
551 methods = append(methods, m)
554 methods = methods[:len(methods):len(methods)]
557 methodsi, _ = methodCache.LoadOrStore(t, methods)
558 return methodsi.([]method)
561 func (t *rtype) NumMethod() int {
562 if t.Kind() == Interface {
563 tt := (*interfaceType)(unsafe.Pointer(t))
564 return tt.NumMethod()
566 if t.uncommonType == nil {
567 return 0 // avoid methodCache synchronization
569 return len(t.exportedMethods())
572 func (t *rtype) Method(i int) (m Method) {
573 if t.Kind() == Interface {
574 tt := (*interfaceType)(unsafe.Pointer(t))
575 return tt.Method(i)
577 methods := t.exportedMethods()
578 if i < 0 || i >= len(methods) {
579 panic("reflect: Method index out of range")
581 p := methods[i]
582 if p.name != nil {
583 m.Name = *p.name
585 fl := flag(Func)
586 mt := p.typ
587 m.Type = toType(mt)
588 x := new(unsafe.Pointer)
589 *x = unsafe.Pointer(&p.tfn)
590 m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
591 m.Index = i
592 return m
595 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
596 if t.Kind() == Interface {
597 tt := (*interfaceType)(unsafe.Pointer(t))
598 return tt.MethodByName(name)
600 ut := t.uncommon()
601 if ut == nil {
602 return Method{}, false
604 utmethods := ut.methods
605 var eidx int
606 for i := 0; i < len(utmethods); i++ {
607 p := utmethods[i]
608 if p.pkgPath == nil {
609 if p.name != nil && *p.name == name {
610 return t.Method(eidx), true
612 eidx++
615 return Method{}, false
618 func (t *rtype) PkgPath() string {
619 return t.uncommonType.PkgPath()
622 func (t *rtype) Name() string {
623 return t.uncommonType.Name()
626 func (t *rtype) ChanDir() ChanDir {
627 if t.Kind() != Chan {
628 panic("reflect: ChanDir of non-chan type")
630 tt := (*chanType)(unsafe.Pointer(t))
631 return ChanDir(tt.dir)
634 func (t *rtype) IsVariadic() bool {
635 if t.Kind() != Func {
636 panic("reflect: IsVariadic of non-func type")
638 tt := (*funcType)(unsafe.Pointer(t))
639 return tt.dotdotdot
642 func (t *rtype) Elem() Type {
643 switch t.Kind() {
644 case Array:
645 tt := (*arrayType)(unsafe.Pointer(t))
646 return toType(tt.elem)
647 case Chan:
648 tt := (*chanType)(unsafe.Pointer(t))
649 return toType(tt.elem)
650 case Map:
651 tt := (*mapType)(unsafe.Pointer(t))
652 return toType(tt.elem)
653 case Ptr:
654 tt := (*ptrType)(unsafe.Pointer(t))
655 return toType(tt.elem)
656 case Slice:
657 tt := (*sliceType)(unsafe.Pointer(t))
658 return toType(tt.elem)
660 panic("reflect: Elem of invalid type")
663 func (t *rtype) Field(i int) StructField {
664 if t.Kind() != Struct {
665 panic("reflect: Field of non-struct type")
667 tt := (*structType)(unsafe.Pointer(t))
668 return tt.Field(i)
671 func (t *rtype) FieldByIndex(index []int) StructField {
672 if t.Kind() != Struct {
673 panic("reflect: FieldByIndex of non-struct type")
675 tt := (*structType)(unsafe.Pointer(t))
676 return tt.FieldByIndex(index)
679 func (t *rtype) FieldByName(name string) (StructField, bool) {
680 if t.Kind() != Struct {
681 panic("reflect: FieldByName of non-struct type")
683 tt := (*structType)(unsafe.Pointer(t))
684 return tt.FieldByName(name)
687 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
688 if t.Kind() != Struct {
689 panic("reflect: FieldByNameFunc of non-struct type")
691 tt := (*structType)(unsafe.Pointer(t))
692 return tt.FieldByNameFunc(match)
695 func (t *rtype) In(i int) Type {
696 if t.Kind() != Func {
697 panic("reflect: In of non-func type")
699 tt := (*funcType)(unsafe.Pointer(t))
700 return toType(tt.in[i])
703 func (t *rtype) Key() Type {
704 if t.Kind() != Map {
705 panic("reflect: Key of non-map type")
707 tt := (*mapType)(unsafe.Pointer(t))
708 return toType(tt.key)
711 func (t *rtype) Len() int {
712 if t.Kind() != Array {
713 panic("reflect: Len of non-array type")
715 tt := (*arrayType)(unsafe.Pointer(t))
716 return int(tt.len)
719 func (t *rtype) NumField() int {
720 if t.Kind() != Struct {
721 panic("reflect: NumField of non-struct type")
723 tt := (*structType)(unsafe.Pointer(t))
724 return len(tt.fields)
727 func (t *rtype) NumIn() int {
728 if t.Kind() != Func {
729 panic("reflect: NumIn of non-func type")
731 tt := (*funcType)(unsafe.Pointer(t))
732 return len(tt.in)
735 func (t *rtype) NumOut() int {
736 if t.Kind() != Func {
737 panic("reflect: NumOut of non-func type")
739 tt := (*funcType)(unsafe.Pointer(t))
740 return len(tt.out)
743 func (t *rtype) Out(i int) Type {
744 if t.Kind() != Func {
745 panic("reflect: Out of non-func type")
747 tt := (*funcType)(unsafe.Pointer(t))
748 return toType(tt.out[i])
751 // add returns p+x.
753 // The whySafe string is ignored, so that the function still inlines
754 // as efficiently as p+x, but all call sites should use the string to
755 // record why the addition is safe, which is to say why the addition
756 // does not cause x to advance to the very end of p's allocation
757 // and therefore point incorrectly at the next block in memory.
758 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
759 return unsafe.Pointer(uintptr(p) + x)
762 func (d ChanDir) String() string {
763 switch d {
764 case SendDir:
765 return "chan<-"
766 case RecvDir:
767 return "<-chan"
768 case BothDir:
769 return "chan"
771 return "ChanDir" + strconv.Itoa(int(d))
774 // Method returns the i'th method in the type's method set.
775 func (t *interfaceType) Method(i int) (m Method) {
776 if i < 0 || i >= len(t.methods) {
777 return
779 p := &t.methods[i]
780 m.Name = *p.name
781 if p.pkgPath != nil {
782 m.PkgPath = *p.pkgPath
784 m.Type = toType(p.typ)
785 m.Index = i
786 return
789 // NumMethod returns the number of interface methods in the type's method set.
790 func (t *interfaceType) NumMethod() int { return len(t.methods) }
792 // MethodByName method with the given name in the type's method set.
793 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
794 if t == nil {
795 return
797 var p *imethod
798 for i := range t.methods {
799 p = &t.methods[i]
800 if *p.name == name {
801 return t.Method(i), true
804 return
807 // A StructField describes a single field in a struct.
808 type StructField struct {
809 // Name is the field name.
810 Name string
811 // PkgPath is the package path that qualifies a lower case (unexported)
812 // field name. It is empty for upper case (exported) field names.
813 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
814 PkgPath string
816 Type Type // field type
817 Tag StructTag // field tag string
818 Offset uintptr // offset within struct, in bytes
819 Index []int // index sequence for Type.FieldByIndex
820 Anonymous bool // is an embedded field
823 // A StructTag is the tag string in a struct field.
825 // By convention, tag strings are a concatenation of
826 // optionally space-separated key:"value" pairs.
827 // Each key is a non-empty string consisting of non-control
828 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
829 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
830 // characters and Go string literal syntax.
831 type StructTag string
833 // Get returns the value associated with key in the tag string.
834 // If there is no such key in the tag, Get returns the empty string.
835 // If the tag does not have the conventional format, the value
836 // returned by Get is unspecified. To determine whether a tag is
837 // explicitly set to the empty string, use Lookup.
838 func (tag StructTag) Get(key string) string {
839 v, _ := tag.Lookup(key)
840 return v
843 // Lookup returns the value associated with key in the tag string.
844 // If the key is present in the tag the value (which may be empty)
845 // is returned. Otherwise the returned value will be the empty string.
846 // The ok return value reports whether the value was explicitly set in
847 // the tag string. If the tag does not have the conventional format,
848 // the value returned by Lookup is unspecified.
849 func (tag StructTag) Lookup(key string) (value string, ok bool) {
850 // When modifying this code, also update the validateStructTag code
851 // in cmd/vet/structtag.go.
853 for tag != "" {
854 // Skip leading space.
855 i := 0
856 for i < len(tag) && tag[i] == ' ' {
859 tag = tag[i:]
860 if tag == "" {
861 break
864 // Scan to colon. A space, a quote or a control character is a syntax error.
865 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
866 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
867 // as it is simpler to inspect the tag's bytes than the tag's runes.
868 i = 0
869 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
872 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
873 break
875 name := string(tag[:i])
876 tag = tag[i+1:]
878 // Scan quoted string to find value.
879 i = 1
880 for i < len(tag) && tag[i] != '"' {
881 if tag[i] == '\\' {
886 if i >= len(tag) {
887 break
889 qvalue := string(tag[:i+1])
890 tag = tag[i+1:]
892 if key == name {
893 value, err := strconv.Unquote(qvalue)
894 if err != nil {
895 break
897 return value, true
900 return "", false
903 // Field returns the i'th struct field.
904 func (t *structType) Field(i int) (f StructField) {
905 if i < 0 || i >= len(t.fields) {
906 panic("reflect: Field index out of bounds")
908 p := &t.fields[i]
909 f.Type = toType(p.typ)
910 f.Name = *p.name
911 f.Anonymous = p.anon()
912 if p.pkgPath != nil {
913 f.PkgPath = *p.pkgPath
915 if p.tag != nil {
916 f.Tag = StructTag(*p.tag)
918 f.Offset = p.offset()
920 // NOTE(rsc): This is the only allocation in the interface
921 // presented by a reflect.Type. It would be nice to avoid,
922 // at least in the common cases, but we need to make sure
923 // that misbehaving clients of reflect cannot affect other
924 // uses of reflect. One possibility is CL 5371098, but we
925 // postponed that ugliness until there is a demonstrated
926 // need for the performance. This is issue 2320.
927 f.Index = []int{i}
928 return
931 // TODO(gri): Should there be an error/bool indicator if the index
932 // is wrong for FieldByIndex?
934 // FieldByIndex returns the nested field corresponding to index.
935 func (t *structType) FieldByIndex(index []int) (f StructField) {
936 f.Type = toType(&t.rtype)
937 for i, x := range index {
938 if i > 0 {
939 ft := f.Type
940 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
941 ft = ft.Elem()
943 f.Type = ft
945 f = f.Type.Field(x)
947 return
950 // A fieldScan represents an item on the fieldByNameFunc scan work list.
951 type fieldScan struct {
952 typ *structType
953 index []int
956 // FieldByNameFunc returns the struct field with a name that satisfies the
957 // match function and a boolean to indicate if the field was found.
958 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
959 // This uses the same condition that the Go language does: there must be a unique instance
960 // of the match at a given depth level. If there are multiple instances of a match at the
961 // same depth, they annihilate each other and inhibit any possible match at a lower level.
962 // The algorithm is breadth first search, one depth level at a time.
964 // The current and next slices are work queues:
965 // current lists the fields to visit on this depth level,
966 // and next lists the fields on the next lower level.
967 current := []fieldScan{}
968 next := []fieldScan{{typ: t}}
970 // nextCount records the number of times an embedded type has been
971 // encountered and considered for queueing in the 'next' slice.
972 // We only queue the first one, but we increment the count on each.
973 // If a struct type T can be reached more than once at a given depth level,
974 // then it annihilates itself and need not be considered at all when we
975 // process that next depth level.
976 var nextCount map[*structType]int
978 // visited records the structs that have been considered already.
979 // Embedded pointer fields can create cycles in the graph of
980 // reachable embedded types; visited avoids following those cycles.
981 // It also avoids duplicated effort: if we didn't find the field in an
982 // embedded type T at level 2, we won't find it in one at level 4 either.
983 visited := map[*structType]bool{}
985 for len(next) > 0 {
986 current, next = next, current[:0]
987 count := nextCount
988 nextCount = nil
990 // Process all the fields at this depth, now listed in 'current'.
991 // The loop queues embedded fields found in 'next', for processing during the next
992 // iteration. The multiplicity of the 'current' field counts is recorded
993 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
994 for _, scan := range current {
995 t := scan.typ
996 if visited[t] {
997 // We've looked through this type before, at a higher level.
998 // That higher level would shadow the lower level we're now at,
999 // so this one can't be useful to us. Ignore it.
1000 continue
1002 visited[t] = true
1003 for i := range t.fields {
1004 f := &t.fields[i]
1005 // Find name and (for anonymous field) type for field f.
1006 fname := *f.name
1007 var ntyp *rtype
1008 if f.anon() {
1009 // Anonymous field of type T or *T.
1010 ntyp = f.typ
1011 if ntyp.Kind() == Ptr {
1012 ntyp = ntyp.Elem().common()
1016 // Does it match?
1017 if match(fname) {
1018 // Potential match
1019 if count[t] > 1 || ok {
1020 // Name appeared multiple times at this level: annihilate.
1021 return StructField{}, false
1023 result = t.Field(i)
1024 result.Index = nil
1025 result.Index = append(result.Index, scan.index...)
1026 result.Index = append(result.Index, i)
1027 ok = true
1028 continue
1031 // Queue embedded struct fields for processing with next level,
1032 // but only if we haven't seen a match yet at this level and only
1033 // if the embedded types haven't already been queued.
1034 if ok || ntyp == nil || ntyp.Kind() != Struct {
1035 continue
1037 ntyp = toType(ntyp).common()
1038 styp := (*structType)(unsafe.Pointer(ntyp))
1039 if nextCount[styp] > 0 {
1040 nextCount[styp] = 2 // exact multiple doesn't matter
1041 continue
1043 if nextCount == nil {
1044 nextCount = map[*structType]int{}
1046 nextCount[styp] = 1
1047 if count[t] > 1 {
1048 nextCount[styp] = 2 // exact multiple doesn't matter
1050 var index []int
1051 index = append(index, scan.index...)
1052 index = append(index, i)
1053 next = append(next, fieldScan{styp, index})
1056 if ok {
1057 break
1060 return
1063 // FieldByName returns the struct field with the given name
1064 // and a boolean to indicate if the field was found.
1065 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1066 // Quick check for top-level name, or struct without anonymous fields.
1067 hasAnon := false
1068 if name != "" {
1069 for i := range t.fields {
1070 tf := &t.fields[i]
1071 if *tf.name == name {
1072 return t.Field(i), true
1074 if tf.anon() {
1075 hasAnon = true
1079 if !hasAnon {
1080 return
1082 return t.FieldByNameFunc(func(s string) bool { return s == name })
1085 // TypeOf returns the reflection Type that represents the dynamic type of i.
1086 // If i is a nil interface value, TypeOf returns nil.
1087 func TypeOf(i interface{}) Type {
1088 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1089 return toType(eface.typ)
1092 // ptrMap is the cache for PtrTo.
1093 var ptrMap sync.Map // map[*rtype]*ptrType
1095 // PtrTo returns the pointer type with element t.
1096 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1097 func PtrTo(t Type) Type {
1098 return t.(*rtype).ptrTo()
1101 func (t *rtype) ptrTo() *rtype {
1102 if p := t.ptrToThis; p != nil {
1103 return p
1106 // Check the cache.
1107 if pi, ok := ptrMap.Load(t); ok {
1108 return &pi.(*ptrType).rtype
1111 s := "*" + *t.string
1113 canonicalTypeLock.RLock()
1114 r, ok := canonicalType[s]
1115 canonicalTypeLock.RUnlock()
1116 if ok {
1117 p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
1118 pi, _ := ptrMap.LoadOrStore(t, p)
1119 return &pi.(*ptrType).rtype
1122 // Create a new ptrType starting with the description
1123 // of an *unsafe.Pointer.
1124 var iptr interface{} = (*unsafe.Pointer)(nil)
1125 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1126 pp := *prototype
1128 pp.string = &s
1129 pp.ptrToThis = nil
1131 // For the type structures linked into the binary, the
1132 // compiler provides a good hash of the string.
1133 // Create a good hash for the new string by using
1134 // the FNV-1 hash's mixing function to combine the
1135 // old hash and the new "*".
1136 // p.hash = fnv1(t.hash, '*')
1137 // This is the gccgo version.
1138 pp.hash = (t.hash << 4) + 9
1140 pp.uncommonType = nil
1141 pp.ptrToThis = nil
1142 pp.elem = t
1144 q := canonicalize(&pp.rtype)
1145 p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
1147 pi, _ := ptrMap.LoadOrStore(t, p)
1148 return &pi.(*ptrType).rtype
1151 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1152 func fnv1(x uint32, list ...byte) uint32 {
1153 for _, b := range list {
1154 x = x*16777619 ^ uint32(b)
1156 return x
1159 func (t *rtype) Implements(u Type) bool {
1160 if u == nil {
1161 panic("reflect: nil type passed to Type.Implements")
1163 if u.Kind() != Interface {
1164 panic("reflect: non-interface type passed to Type.Implements")
1166 return implements(u.(*rtype), t)
1169 func (t *rtype) AssignableTo(u Type) bool {
1170 if u == nil {
1171 panic("reflect: nil type passed to Type.AssignableTo")
1173 uu := u.(*rtype)
1174 return directlyAssignable(uu, t) || implements(uu, t)
1177 func (t *rtype) ConvertibleTo(u Type) bool {
1178 if u == nil {
1179 panic("reflect: nil type passed to Type.ConvertibleTo")
1181 uu := u.(*rtype)
1182 return convertOp(uu, t) != nil
1185 func (t *rtype) Comparable() bool {
1186 switch t.Kind() {
1187 case Bool, Int, Int8, Int16, Int32, Int64,
1188 Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
1189 Float32, Float64, Complex64, Complex128,
1190 Chan, Interface, Ptr, String, UnsafePointer:
1191 return true
1193 case Func, Map, Slice:
1194 return false
1196 case Array:
1197 return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
1199 case Struct:
1200 tt := (*structType)(unsafe.Pointer(t))
1201 for i := range tt.fields {
1202 if !tt.fields[i].typ.Comparable() {
1203 return false
1206 return true
1208 default:
1209 panic("reflect: impossible")
1213 // implements reports whether the type V implements the interface type T.
1214 func implements(T, V *rtype) bool {
1215 if T.Kind() != Interface {
1216 return false
1218 t := (*interfaceType)(unsafe.Pointer(T))
1219 if len(t.methods) == 0 {
1220 return true
1223 // The same algorithm applies in both cases, but the
1224 // method tables for an interface type and a concrete type
1225 // are different, so the code is duplicated.
1226 // In both cases the algorithm is a linear scan over the two
1227 // lists - T's methods and V's methods - simultaneously.
1228 // Since method tables are stored in a unique sorted order
1229 // (alphabetical, with no duplicate method names), the scan
1230 // through V's methods must hit a match for each of T's
1231 // methods along the way, or else V does not implement T.
1232 // This lets us run the scan in overall linear time instead of
1233 // the quadratic time a naive search would require.
1234 // See also ../runtime/iface.go.
1235 if V.Kind() == Interface {
1236 v := (*interfaceType)(unsafe.Pointer(V))
1237 i := 0
1238 for j := 0; j < len(v.methods); j++ {
1239 tm := &t.methods[i]
1240 vm := &v.methods[j]
1241 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.typ).common() == toType(tm.typ).common() {
1242 if i++; i >= len(t.methods) {
1243 return true
1247 return false
1250 v := V.uncommon()
1251 if v == nil {
1252 return false
1254 i := 0
1255 for j := 0; j < len(v.methods); j++ {
1256 tm := &t.methods[i]
1257 vm := &v.methods[j]
1258 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.mtyp).common() == toType(tm.typ).common() {
1259 if i++; i >= len(t.methods) {
1260 return true
1264 return false
1267 // directlyAssignable reports whether a value x of type V can be directly
1268 // assigned (using memmove) to a value of type T.
1269 // https://golang.org/doc/go_spec.html#Assignability
1270 // Ignoring the interface rules (implemented elsewhere)
1271 // and the ideal constant rules (no ideal constants at run time).
1272 func directlyAssignable(T, V *rtype) bool {
1273 // x's type V is identical to T?
1274 if T == V {
1275 return true
1278 // Otherwise at least one of T and V must be unnamed
1279 // and they must have the same kind.
1280 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
1281 return false
1284 // x's type T and V must have identical underlying types.
1285 return haveIdenticalUnderlyingType(T, V, true)
1288 func haveIdenticalType(T, V Type, cmpTags bool) bool {
1289 if cmpTags {
1290 return T == V
1293 if T.Name() != V.Name() || T.Kind() != V.Kind() {
1294 return false
1297 return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1300 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1301 if T == V {
1302 return true
1305 kind := T.Kind()
1306 if kind != V.Kind() {
1307 return false
1310 // Non-composite types of equal kind have same underlying type
1311 // (the predefined instance of the type).
1312 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1313 return true
1316 // Composite types.
1317 switch kind {
1318 case Array:
1319 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1321 case Chan:
1322 // Special case:
1323 // x is a bidirectional channel value, T is a channel type,
1324 // and x's type V and T have identical element types.
1325 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
1326 return true
1329 // Otherwise continue test for identical underlying type.
1330 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1332 case Func:
1333 t := (*funcType)(unsafe.Pointer(T))
1334 v := (*funcType)(unsafe.Pointer(V))
1335 if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
1336 return false
1338 for i, typ := range t.in {
1339 if !haveIdenticalType(typ, v.in[i], cmpTags) {
1340 return false
1343 for i, typ := range t.out {
1344 if !haveIdenticalType(typ, v.out[i], cmpTags) {
1345 return false
1348 return true
1350 case Interface:
1351 t := (*interfaceType)(unsafe.Pointer(T))
1352 v := (*interfaceType)(unsafe.Pointer(V))
1353 if len(t.methods) == 0 && len(v.methods) == 0 {
1354 return true
1356 // Might have the same methods but still
1357 // need a run time conversion.
1358 return false
1360 case Map:
1361 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1363 case Ptr, Slice:
1364 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1366 case Struct:
1367 t := (*structType)(unsafe.Pointer(T))
1368 v := (*structType)(unsafe.Pointer(V))
1369 if len(t.fields) != len(v.fields) {
1370 return false
1372 for i := range t.fields {
1373 tf := &t.fields[i]
1374 vf := &v.fields[i]
1375 if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
1376 return false
1378 if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
1379 return false
1381 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1382 return false
1384 if cmpTags && tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
1385 return false
1387 if tf.offsetAnon != vf.offsetAnon {
1388 return false
1391 return true
1394 return false
1397 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1398 var lookupCache sync.Map // map[cacheKey]*rtype
1400 // A cacheKey is the key for use in the lookupCache.
1401 // Four values describe any of the types we are looking for:
1402 // type kind, one or two subtypes, and an extra integer.
1403 type cacheKey struct {
1404 kind Kind
1405 t1 *rtype
1406 t2 *rtype
1407 extra uintptr
1410 // The funcLookupCache caches FuncOf lookups.
1411 // FuncOf does not share the common lookupCache since cacheKey is not
1412 // sufficient to represent functions unambiguously.
1413 var funcLookupCache struct {
1414 sync.Mutex // Guards stores (but not loads) on m.
1416 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1417 // Elements of m are append-only and thus safe for concurrent reading.
1418 m sync.Map
1421 // ChanOf returns the channel type with the given direction and element type.
1422 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1424 // The gc runtime imposes a limit of 64 kB on channel element types.
1425 // If t's size is equal to or exceeds this limit, ChanOf panics.
1426 func ChanOf(dir ChanDir, t Type) Type {
1427 typ := t.(*rtype)
1429 // Look in cache.
1430 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1431 if ch, ok := lookupCache.Load(ckey); ok {
1432 return ch.(*rtype)
1435 // This restriction is imposed by the gc compiler and the runtime.
1436 if typ.size >= 1<<16 {
1437 panic("reflect.ChanOf: element size too large")
1440 // Look in known types.
1441 // TODO: Precedence when constructing string.
1442 var s string
1443 switch dir {
1444 default:
1445 panic("reflect.ChanOf: invalid dir")
1446 case SendDir:
1447 s = "chan<- " + *typ.string
1448 case RecvDir:
1449 s = "<-chan " + *typ.string
1450 case BothDir:
1451 s = "chan " + *typ.string
1454 // Make a channel type.
1455 var ichan interface{} = (chan unsafe.Pointer)(nil)
1456 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1457 ch := *prototype
1458 ch.dir = uintptr(dir)
1459 ch.string = &s
1461 // gccgo uses a different hash.
1462 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1463 ch.hash = 0
1464 if dir&SendDir != 0 {
1465 ch.hash += 1
1467 if dir&RecvDir != 0 {
1468 ch.hash += 2
1470 ch.hash += typ.hash << 2
1471 ch.hash <<= 3
1472 ch.hash += 15
1474 ch.elem = typ
1475 ch.uncommonType = nil
1476 ch.ptrToThis = nil
1478 // Canonicalize before storing in lookupCache
1479 ti := toType(&ch.rtype)
1480 lookupCache.Store(ckey, ti.(*rtype))
1481 return ti
1484 func ismapkey(*rtype) bool // implemented in runtime
1486 // MapOf returns the map type with the given key and element types.
1487 // For example, if k represents int and e represents string,
1488 // MapOf(k, e) represents map[int]string.
1490 // If the key type is not a valid map key type (that is, if it does
1491 // not implement Go's == operator), MapOf panics.
1492 func MapOf(key, elem Type) Type {
1493 ktyp := key.(*rtype)
1494 etyp := elem.(*rtype)
1496 if !ismapkey(ktyp) {
1497 panic("reflect.MapOf: invalid key type " + ktyp.String())
1500 // Look in cache.
1501 ckey := cacheKey{Map, ktyp, etyp, 0}
1502 if mt, ok := lookupCache.Load(ckey); ok {
1503 return mt.(Type)
1506 // Look in known types.
1507 s := "map[" + *ktyp.string + "]" + *etyp.string
1509 // Make a map type.
1510 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1511 mt := **(**mapType)(unsafe.Pointer(&imap))
1512 mt.string = &s
1514 // gccgo uses a different hash
1515 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1516 mt.hash = ktyp.hash + etyp.hash + 2 + 14
1518 mt.key = ktyp
1519 mt.elem = etyp
1520 mt.uncommonType = nil
1521 mt.ptrToThis = nil
1523 mt.bucket = bucketOf(ktyp, etyp)
1524 if ktyp.size > maxKeySize {
1525 mt.keysize = uint8(ptrSize)
1526 mt.indirectkey = 1
1527 } else {
1528 mt.keysize = uint8(ktyp.size)
1529 mt.indirectkey = 0
1531 if etyp.size > maxValSize {
1532 mt.valuesize = uint8(ptrSize)
1533 mt.indirectvalue = 1
1534 } else {
1535 mt.valuesize = uint8(etyp.size)
1536 mt.indirectvalue = 0
1538 mt.bucketsize = uint16(mt.bucket.size)
1539 mt.reflexivekey = isReflexive(ktyp)
1540 mt.needkeyupdate = needKeyUpdate(ktyp)
1542 // Canonicalize before storing in lookupCache
1543 ti := toType(&mt.rtype)
1544 lookupCache.Store(ckey, ti.(*rtype))
1545 return ti
1548 // FuncOf returns the function type with the given argument and result types.
1549 // For example if k represents int and e represents string,
1550 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1552 // The variadic argument controls whether the function is variadic. FuncOf
1553 // panics if the in[len(in)-1] does not represent a slice and variadic is
1554 // true.
1555 func FuncOf(in, out []Type, variadic bool) Type {
1556 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1557 panic("reflect.FuncOf: last arg of variadic func must be slice")
1560 // Make a func type.
1561 var ifunc interface{} = (func())(nil)
1562 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1563 ft := new(funcType)
1564 *ft = *prototype
1566 // Build a hash and minimally populate ft.
1567 var hash uint32
1568 var fin, fout []*rtype
1569 shift := uint(1)
1570 for _, in := range in {
1571 t := in.(*rtype)
1572 fin = append(fin, t)
1573 hash += t.hash << shift
1574 shift++
1576 shift = 2
1577 for _, out := range out {
1578 t := out.(*rtype)
1579 fout = append(fout, t)
1580 hash += t.hash << shift
1581 shift++
1583 if variadic {
1584 hash++
1586 hash <<= 4
1587 hash += 8
1588 ft.hash = hash
1589 ft.in = fin
1590 ft.out = fout
1591 ft.dotdotdot = variadic
1593 // Look in cache.
1594 if ts, ok := funcLookupCache.m.Load(hash); ok {
1595 for _, t := range ts.([]*rtype) {
1596 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1597 return t
1602 // Not in cache, lock and retry.
1603 funcLookupCache.Lock()
1604 defer funcLookupCache.Unlock()
1605 if ts, ok := funcLookupCache.m.Load(hash); ok {
1606 for _, t := range ts.([]*rtype) {
1607 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1608 return t
1613 addToCache := func(tt *rtype) Type {
1614 var rts []*rtype
1615 if rti, ok := funcLookupCache.m.Load(hash); ok {
1616 rts = rti.([]*rtype)
1618 funcLookupCache.m.Store(hash, append(rts, tt))
1619 return tt
1622 str := funcStr(ft)
1624 // Populate the remaining fields of ft and store in cache.
1625 ft.string = &str
1626 ft.uncommonType = nil
1627 ft.ptrToThis = nil
1629 // Canonicalize before storing in funcLookupCache
1630 tc := toType(&ft.rtype)
1631 return addToCache(tc.(*rtype))
1634 // funcStr builds a string representation of a funcType.
1635 func funcStr(ft *funcType) string {
1636 repr := make([]byte, 0, 64)
1637 repr = append(repr, "func("...)
1638 for i, t := range ft.in {
1639 if i > 0 {
1640 repr = append(repr, ", "...)
1642 if ft.dotdotdot && i == len(ft.in)-1 {
1643 repr = append(repr, "..."...)
1644 repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
1645 } else {
1646 repr = append(repr, *t.string...)
1649 repr = append(repr, ')')
1650 if l := len(ft.out); l == 1 {
1651 repr = append(repr, ' ')
1652 } else if l > 1 {
1653 repr = append(repr, " ("...)
1655 for i, t := range ft.out {
1656 if i > 0 {
1657 repr = append(repr, ", "...)
1659 repr = append(repr, *t.string...)
1661 if len(ft.out) > 1 {
1662 repr = append(repr, ')')
1664 return string(repr)
1667 // isReflexive reports whether the == operation on the type is reflexive.
1668 // That is, x == x for all values x of type t.
1669 func isReflexive(t *rtype) bool {
1670 switch t.Kind() {
1671 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
1672 return true
1673 case Float32, Float64, Complex64, Complex128, Interface:
1674 return false
1675 case Array:
1676 tt := (*arrayType)(unsafe.Pointer(t))
1677 return isReflexive(tt.elem)
1678 case Struct:
1679 tt := (*structType)(unsafe.Pointer(t))
1680 for _, f := range tt.fields {
1681 if !isReflexive(f.typ) {
1682 return false
1685 return true
1686 default:
1687 // Func, Map, Slice, Invalid
1688 panic("isReflexive called on non-key type " + t.String())
1692 // needKeyUpdate reports whether map overwrites require the key to be copied.
1693 func needKeyUpdate(t *rtype) bool {
1694 switch t.Kind() {
1695 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
1696 return false
1697 case Float32, Float64, Complex64, Complex128, Interface, String:
1698 // Float keys can be updated from +0 to -0.
1699 // String keys can be updated to use a smaller backing store.
1700 // Interfaces might have floats of strings in them.
1701 return true
1702 case Array:
1703 tt := (*arrayType)(unsafe.Pointer(t))
1704 return needKeyUpdate(tt.elem)
1705 case Struct:
1706 tt := (*structType)(unsafe.Pointer(t))
1707 for _, f := range tt.fields {
1708 if needKeyUpdate(f.typ) {
1709 return true
1712 return false
1713 default:
1714 // Func, Map, Slice, Invalid
1715 panic("needKeyUpdate called on non-key type " + t.String())
1719 // Make sure these routines stay in sync with ../../runtime/hashmap.go!
1720 // These types exist only for GC, so we only fill out GC relevant info.
1721 // Currently, that's just size and the GC program. We also fill in string
1722 // for possible debugging use.
1723 const (
1724 bucketSize uintptr = 8
1725 maxKeySize uintptr = 128
1726 maxValSize uintptr = 128
1729 func bucketOf(ktyp, etyp *rtype) *rtype {
1730 // See comment on hmap.overflow in ../runtime/hashmap.go.
1731 var kind uint8
1732 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
1733 ktyp.size <= maxKeySize && etyp.size <= maxValSize {
1734 kind = kindNoPointers
1737 if ktyp.size > maxKeySize {
1738 ktyp = PtrTo(ktyp).(*rtype)
1740 if etyp.size > maxValSize {
1741 etyp = PtrTo(etyp).(*rtype)
1744 // Prepare GC data if any.
1745 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1746 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1747 // Note that since the key and value are known to be <= 128 bytes,
1748 // they're guaranteed to have bitmaps instead of GC programs.
1749 var gcdata *byte
1750 var ptrdata uintptr
1752 size := bucketSize
1753 size = align(size, uintptr(ktyp.fieldAlign))
1754 size += bucketSize * ktyp.size
1755 size = align(size, uintptr(etyp.fieldAlign))
1756 size += bucketSize * etyp.size
1758 maxAlign := uintptr(ktyp.fieldAlign)
1759 if maxAlign < uintptr(etyp.fieldAlign) {
1760 maxAlign = uintptr(etyp.fieldAlign)
1762 if maxAlign > ptrSize {
1763 size = align(size, maxAlign)
1764 size += align(ptrSize, maxAlign) - ptrSize
1765 } else if maxAlign < ptrSize {
1766 size = align(size, ptrSize)
1767 maxAlign = ptrSize
1770 ovoff := size
1771 size += ptrSize
1773 if kind != kindNoPointers {
1774 nptr := size / ptrSize
1775 mask := make([]byte, (nptr+7)/8)
1776 psize := bucketSize
1777 psize = align(psize, uintptr(ktyp.fieldAlign))
1778 base := psize / ptrSize
1780 if ktyp.kind&kindNoPointers == 0 {
1781 if ktyp.kind&kindGCProg != 0 {
1782 panic("reflect: unexpected GC program in MapOf")
1784 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
1785 for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
1786 if (kmask[i/8]>>(i%8))&1 != 0 {
1787 for j := uintptr(0); j < bucketSize; j++ {
1788 word := base + j*ktyp.size/ptrSize + i
1789 mask[word/8] |= 1 << (word % 8)
1794 psize += bucketSize * ktyp.size
1795 psize = align(psize, uintptr(etyp.fieldAlign))
1796 base = psize / ptrSize
1798 if etyp.kind&kindNoPointers == 0 {
1799 if etyp.kind&kindGCProg != 0 {
1800 panic("reflect: unexpected GC program in MapOf")
1802 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
1803 for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
1804 if (emask[i/8]>>(i%8))&1 != 0 {
1805 for j := uintptr(0); j < bucketSize; j++ {
1806 word := base + j*etyp.size/ptrSize + i
1807 mask[word/8] |= 1 << (word % 8)
1813 word := ovoff / ptrSize
1814 mask[word/8] |= 1 << (word % 8)
1815 gcdata = &mask[0]
1816 ptrdata = (word + 1) * ptrSize
1818 // overflow word must be last
1819 if ptrdata != size {
1820 panic("reflect: bad layout computation in MapOf")
1824 b := &rtype{
1825 align: int8(maxAlign),
1826 fieldAlign: uint8(maxAlign),
1827 size: size,
1828 kind: kind,
1829 ptrdata: ptrdata,
1830 gcdata: gcdata,
1832 s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
1833 b.string = &s
1834 return b
1837 // SliceOf returns the slice type with element type t.
1838 // For example, if t represents int, SliceOf(t) represents []int.
1839 func SliceOf(t Type) Type {
1840 typ := t.(*rtype)
1842 // Look in cache.
1843 ckey := cacheKey{Slice, typ, nil, 0}
1844 if slice, ok := lookupCache.Load(ckey); ok {
1845 return slice.(Type)
1848 // Look in known types.
1849 s := "[]" + *typ.string
1851 // Make a slice type.
1852 var islice interface{} = ([]unsafe.Pointer)(nil)
1853 prototype := *(**sliceType)(unsafe.Pointer(&islice))
1854 slice := *prototype
1855 slice.string = &s
1857 // gccgo uses a different hash.
1858 // slice.hash = fnv1(typ.hash, '[')
1859 slice.hash = typ.hash + 1 + 13
1861 slice.elem = typ
1862 slice.uncommonType = nil
1863 slice.ptrToThis = nil
1865 // Canonicalize before storing in lookupCache
1866 ti := toType(&slice.rtype)
1867 lookupCache.Store(ckey, ti.(*rtype))
1868 return ti
1871 // The structLookupCache caches StructOf lookups.
1872 // StructOf does not share the common lookupCache since we need to pin
1873 // the memory associated with *structTypeFixedN.
1874 var structLookupCache struct {
1875 sync.Mutex // Guards stores (but not loads) on m.
1877 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
1878 // Elements in m are append-only and thus safe for concurrent reading.
1879 m sync.Map
1882 // isLetter returns true if a given 'rune' is classified as a Letter.
1883 func isLetter(ch rune) bool {
1884 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
1887 // isValidFieldName checks if a string is a valid (struct) field name or not.
1889 // According to the language spec, a field name should be an identifier.
1891 // identifier = letter { letter | unicode_digit } .
1892 // letter = unicode_letter | "_" .
1893 func isValidFieldName(fieldName string) bool {
1894 for i, c := range fieldName {
1895 if i == 0 && !isLetter(c) {
1896 return false
1899 if !(isLetter(c) || unicode.IsDigit(c)) {
1900 return false
1904 return len(fieldName) > 0
1907 // StructOf returns the struct type containing fields.
1908 // The Offset and Index fields are ignored and computed as they would be
1909 // by the compiler.
1911 // StructOf currently does not generate wrapper methods for embedded fields.
1912 // This limitation may be lifted in a future version.
1913 func StructOf(fields []StructField) Type {
1914 var (
1915 hash = uint32(12)
1916 size uintptr
1917 typalign int8
1918 comparable = true
1919 hashable = true
1921 fs = make([]structField, len(fields))
1922 repr = make([]byte, 0, 64)
1923 fset = map[string]struct{}{} // fields' names
1925 hasPtr = false // records whether at least one struct-field is a pointer
1926 hasGCProg = false // records whether a struct-field type has a GCProg
1929 lastzero := uintptr(0)
1930 repr = append(repr, "struct {"...)
1931 for i, field := range fields {
1932 if field.Name == "" {
1933 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
1935 if !isValidFieldName(field.Name) {
1936 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
1938 if field.Type == nil {
1939 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
1941 f := runtimeStructField(field)
1942 ft := f.typ
1943 if ft.kind&kindGCProg != 0 {
1944 hasGCProg = true
1946 if ft.pointers() {
1947 hasPtr = true
1950 // Update string and hash
1951 name := *f.name
1952 hash = (hash << 1) + ft.hash
1953 if !f.anon() {
1954 repr = append(repr, (" " + name)...)
1955 } else {
1956 // Embedded field
1957 repr = append(repr, " ?"...)
1958 if f.typ.Kind() == Ptr {
1959 // Embedded ** and *interface{} are illegal
1960 elem := ft.Elem()
1961 if k := elem.Kind(); k == Ptr || k == Interface {
1962 panic("reflect.StructOf: illegal anonymous field type " + ft.String())
1964 name = elem.String()
1965 } else {
1966 name = ft.String()
1969 switch f.typ.Kind() {
1970 case Interface:
1971 ift := (*interfaceType)(unsafe.Pointer(ft))
1972 if len(ift.methods) > 0 {
1973 panic("reflect.StructOf: embedded field with methods not implemented")
1975 case Ptr:
1976 ptr := (*ptrType)(unsafe.Pointer(ft))
1977 if unt := ptr.uncommon(); unt != nil {
1978 if len(unt.methods) > 0 {
1979 panic("reflect.StructOf: embedded field with methods not implemented")
1982 if unt := ptr.elem.uncommon(); unt != nil {
1983 if len(unt.methods) > 0 {
1984 panic("reflect.StructOf: embedded field with methods not implemented")
1987 default:
1988 if unt := ft.uncommon(); unt != nil {
1989 if len(unt.methods) > 0 {
1990 panic("reflect.StructOf: embedded field with methods not implemented")
1995 if _, dup := fset[name]; dup {
1996 panic("reflect.StructOf: duplicate field " + name)
1998 fset[name] = struct{}{}
2000 repr = append(repr, (" " + *ft.string)...)
2001 if f.tag != nil {
2002 repr = append(repr, (" " + strconv.Quote(*f.tag))...)
2004 if i < len(fields)-1 {
2005 repr = append(repr, ';')
2008 comparable = comparable && (ft.equalfn != nil)
2009 hashable = hashable && (ft.hashfn != nil)
2011 offset := align(size, uintptr(ft.fieldAlign))
2012 if int8(ft.fieldAlign) > typalign {
2013 typalign = int8(ft.fieldAlign)
2015 size = offset + ft.size
2016 f.offsetAnon |= offset << 1
2018 if ft.size == 0 {
2019 lastzero = size
2022 fs[i] = f
2025 if size > 0 && lastzero == size {
2026 // This is a non-zero sized struct that ends in a
2027 // zero-sized field. We add an extra byte of padding,
2028 // to ensure that taking the address of the final
2029 // zero-sized field can't manufacture a pointer to the
2030 // next object in the heap. See issue 9401.
2031 size++
2034 if len(fs) > 0 {
2035 repr = append(repr, ' ')
2037 repr = append(repr, '}')
2038 hash <<= 2
2039 str := string(repr)
2041 // Round the size up to be a multiple of the alignment.
2042 size = align(size, uintptr(typalign))
2044 // Make the struct type.
2045 var istruct interface{} = struct{}{}
2046 prototype := *(**structType)(unsafe.Pointer(&istruct))
2047 typ := new(structType)
2048 *typ = *prototype
2049 typ.fields = fs
2051 // Look in cache.
2052 if ts, ok := structLookupCache.m.Load(hash); ok {
2053 for _, st := range ts.([]Type) {
2054 t := st.common()
2055 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2056 return t
2061 // Not in cache, lock and retry.
2062 structLookupCache.Lock()
2063 defer structLookupCache.Unlock()
2064 if ts, ok := structLookupCache.m.Load(hash); ok {
2065 for _, st := range ts.([]Type) {
2066 t := st.common()
2067 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2068 return t
2073 addToCache := func(t Type) Type {
2074 var ts []Type
2075 if ti, ok := structLookupCache.m.Load(hash); ok {
2076 ts = ti.([]Type)
2078 structLookupCache.m.Store(hash, append(ts, t))
2079 return t
2082 typ.string = &str
2083 typ.hash = hash
2084 typ.size = size
2085 typ.align = typalign
2086 typ.fieldAlign = uint8(typalign)
2087 if !hasPtr {
2088 typ.kind |= kindNoPointers
2089 } else {
2090 typ.kind &^= kindNoPointers
2093 if hasGCProg {
2094 lastPtrField := 0
2095 for i, ft := range fs {
2096 if ft.typ.pointers() {
2097 lastPtrField = i
2100 prog := []byte{0, 0, 0, 0} // will be length of prog
2101 for i, ft := range fs {
2102 if i > lastPtrField {
2103 // gcprog should not include anything for any field after
2104 // the last field that contains pointer data
2105 break
2107 // FIXME(sbinet) handle padding, fields smaller than a word
2108 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:]
2109 elemPtrs := ft.typ.ptrdata / ptrSize
2110 switch {
2111 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0:
2112 // Element is small with pointer mask; use as literal bits.
2113 mask := elemGC
2114 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2115 var n uintptr
2116 for n := elemPtrs; n > 120; n -= 120 {
2117 prog = append(prog, 120)
2118 prog = append(prog, mask[:15]...)
2119 mask = mask[15:]
2121 prog = append(prog, byte(n))
2122 prog = append(prog, mask[:(n+7)/8]...)
2123 case ft.typ.kind&kindGCProg != 0:
2124 // Element has GC program; emit one element.
2125 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2126 prog = append(prog, elemProg...)
2128 // Pad from ptrdata to size.
2129 elemWords := ft.typ.size / ptrSize
2130 if elemPtrs < elemWords {
2131 // Emit literal 0 bit, then repeat as needed.
2132 prog = append(prog, 0x01, 0x00)
2133 if elemPtrs+1 < elemWords {
2134 prog = append(prog, 0x81)
2135 prog = appendVarint(prog, elemWords-elemPtrs-1)
2139 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2140 typ.kind |= kindGCProg
2141 typ.gcdata = &prog[0]
2142 } else {
2143 typ.kind &^= kindGCProg
2144 bv := new(bitVector)
2145 addTypeBits(bv, 0, typ.common())
2146 if len(bv.data) > 0 {
2147 typ.gcdata = &bv.data[0]
2150 typ.ptrdata = typeptrdata(typ.common())
2152 if hashable {
2153 typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
2154 o := seed
2155 for _, ft := range typ.fields {
2156 pi := add(p, ft.offset(), "&x.field safe")
2157 o = ft.typ.hashfn(pi, o)
2159 return o
2161 } else {
2162 typ.hashfn = nil
2165 if comparable {
2166 typ.equalfn = func(p, q unsafe.Pointer) bool {
2167 for _, ft := range typ.fields {
2168 pi := add(p, ft.offset(), "&x.field safe")
2169 qi := add(q, ft.offset(), "&x.field safe")
2170 if !ft.typ.equalfn(pi, qi) {
2171 return false
2174 return true
2176 } else {
2177 typ.equalfn = nil
2180 typ.kind &^= kindDirectIface
2181 typ.uncommonType = nil
2182 typ.ptrToThis = nil
2184 // Canonicalize before storing in structLookupCache
2185 ti := toType(&typ.rtype)
2186 return addToCache(ti.(*rtype))
2189 func runtimeStructField(field StructField) structField {
2190 if field.PkgPath != "" {
2191 panic("reflect.StructOf: StructOf does not allow unexported fields")
2194 // Best-effort check for misuse.
2195 // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
2196 c := field.Name[0]
2197 if 'a' <= c && c <= 'z' || c == '_' {
2198 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2201 offsetAnon := uintptr(0)
2202 if field.Anonymous {
2203 offsetAnon |= 1
2206 s := field.Name
2207 name := &s
2209 var tag *string
2210 if field.Tag != "" {
2211 st := string(field.Tag)
2212 tag = &st
2215 return structField{
2216 name: name,
2217 pkgPath: nil,
2218 typ: field.Type.common(),
2219 tag: tag,
2220 offsetAnon: offsetAnon,
2224 // typeptrdata returns the length in bytes of the prefix of t
2225 // containing pointer data. Anything after this offset is scalar data.
2226 // keep in sync with ../cmd/compile/internal/gc/reflect.go
2227 func typeptrdata(t *rtype) uintptr {
2228 if !t.pointers() {
2229 return 0
2231 switch t.Kind() {
2232 case Struct:
2233 st := (*structType)(unsafe.Pointer(t))
2234 // find the last field that has pointers.
2235 field := 0
2236 for i := range st.fields {
2237 ft := st.fields[i].typ
2238 if ft.pointers() {
2239 field = i
2242 f := st.fields[field]
2243 return f.offset() + f.typ.ptrdata
2245 default:
2246 panic("reflect.typeptrdata: unexpected type, " + t.String())
2250 // See cmd/compile/internal/gc/reflect.go for derivation of constant.
2251 const maxPtrmaskBytes = 2048
2253 // ArrayOf returns the array type with the given count and element type.
2254 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2256 // If the resulting type would be larger than the available address space,
2257 // ArrayOf panics.
2258 func ArrayOf(count int, elem Type) Type {
2259 typ := elem.(*rtype)
2261 // Look in cache.
2262 ckey := cacheKey{Array, typ, nil, uintptr(count)}
2263 if array, ok := lookupCache.Load(ckey); ok {
2264 return array.(Type)
2267 // Look in known types.
2268 s := "[" + strconv.Itoa(count) + "]" + *typ.string
2270 // Make an array type.
2271 var iarray interface{} = [1]unsafe.Pointer{}
2272 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2273 array := *prototype
2274 array.string = &s
2276 // gccgo uses a different hash.
2277 // array.hash = fnv1(typ.hash, '[')
2278 // for n := uint32(count); n > 0; n >>= 8 {
2279 // array.hash = fnv1(array.hash, byte(n))
2280 // }
2281 // array.hash = fnv1(array.hash, ']')
2282 array.hash = typ.hash + 1 + 13
2284 array.elem = typ
2285 array.ptrToThis = nil
2286 if typ.size > 0 {
2287 max := ^uintptr(0) / typ.size
2288 if uintptr(count) > max {
2289 panic("reflect.ArrayOf: array size would exceed virtual address space")
2292 array.size = typ.size * uintptr(count)
2293 if count > 0 && typ.ptrdata != 0 {
2294 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
2296 array.align = typ.align
2297 array.fieldAlign = typ.fieldAlign
2298 array.uncommonType = nil
2299 array.len = uintptr(count)
2300 array.slice = SliceOf(elem).(*rtype)
2302 array.kind &^= kindNoPointers
2303 switch {
2304 case typ.kind&kindNoPointers != 0 || array.size == 0:
2305 // No pointers.
2306 array.kind |= kindNoPointers
2307 array.gcdata = nil
2308 array.ptrdata = 0
2310 case count == 1:
2311 // In memory, 1-element array looks just like the element.
2312 array.kind |= typ.kind & kindGCProg
2313 array.gcdata = typ.gcdata
2314 array.ptrdata = typ.ptrdata
2316 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
2317 // Element is small with pointer mask; array is still small.
2318 // Create direct pointer mask by turning each 1 bit in elem
2319 // into count 1 bits in larger mask.
2320 mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
2321 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2322 elemWords := typ.size / ptrSize
2323 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ {
2324 if (elemMask[j/8]>>(j%8))&1 != 0 {
2325 for i := uintptr(0); i < array.len; i++ {
2326 k := i*elemWords + j
2327 mask[k/8] |= 1 << (k % 8)
2331 array.gcdata = &mask[0]
2333 default:
2334 // Create program that emits one element
2335 // and then repeats to make the array.
2336 prog := []byte{0, 0, 0, 0} // will be length of prog
2337 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2338 elemPtrs := typ.ptrdata / ptrSize
2339 if typ.kind&kindGCProg == 0 {
2340 // Element is small with pointer mask; use as literal bits.
2341 mask := elemGC
2342 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2343 var n uintptr
2344 for n = elemPtrs; n > 120; n -= 120 {
2345 prog = append(prog, 120)
2346 prog = append(prog, mask[:15]...)
2347 mask = mask[15:]
2349 prog = append(prog, byte(n))
2350 prog = append(prog, mask[:(n+7)/8]...)
2351 } else {
2352 // Element has GC program; emit one element.
2353 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2354 prog = append(prog, elemProg...)
2356 // Pad from ptrdata to size.
2357 elemWords := typ.size / ptrSize
2358 if elemPtrs < elemWords {
2359 // Emit literal 0 bit, then repeat as needed.
2360 prog = append(prog, 0x01, 0x00)
2361 if elemPtrs+1 < elemWords {
2362 prog = append(prog, 0x81)
2363 prog = appendVarint(prog, elemWords-elemPtrs-1)
2366 // Repeat count-1 times.
2367 if elemWords < 0x80 {
2368 prog = append(prog, byte(elemWords|0x80))
2369 } else {
2370 prog = append(prog, 0x80)
2371 prog = appendVarint(prog, elemWords)
2373 prog = appendVarint(prog, uintptr(count)-1)
2374 prog = append(prog, 0)
2375 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2376 array.kind |= kindGCProg
2377 array.gcdata = &prog[0]
2378 array.ptrdata = array.size // overestimate but ok; must match program
2381 array.kind &^= kindDirectIface
2383 esize := typ.size
2385 if typ.equalfn == nil {
2386 array.equalfn = nil
2387 } else {
2388 eequal := typ.equalfn
2389 array.equalfn = func(p, q unsafe.Pointer) bool {
2390 for i := 0; i < count; i++ {
2391 pi := arrayAt(p, i, esize, "i < count")
2392 qi := arrayAt(q, i, esize, "i < count")
2393 if !eequal(pi, qi) {
2394 return false
2397 return true
2401 if typ.hashfn == nil {
2402 array.hashfn = nil
2403 } else {
2404 ehash := typ.hashfn
2405 array.hashfn = func(ptr unsafe.Pointer, seed uintptr) uintptr {
2406 o := seed
2407 for i := 0; i < count; i++ {
2408 o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
2410 return o
2414 // Canonicalize before storing in lookupCache
2415 ti := toType(&array.rtype)
2416 lookupCache.Store(ckey, ti.(*rtype))
2417 return ti
2420 func appendVarint(x []byte, v uintptr) []byte {
2421 for ; v >= 0x80; v >>= 7 {
2422 x = append(x, byte(v|0x80))
2424 x = append(x, byte(v))
2425 return x
2428 // toType converts from a *rtype to a Type that can be returned
2429 // to the client of package reflect. In gc, the only concern is that
2430 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2431 // function takes care of ensuring that multiple *rtype for the same
2432 // type are coalesced into a single Type.
2433 var canonicalType = make(map[string]Type)
2435 var canonicalTypeLock sync.RWMutex
2437 func canonicalize(t Type) Type {
2438 if t == nil {
2439 return nil
2441 s := t.rawString()
2442 canonicalTypeLock.RLock()
2443 if r, ok := canonicalType[s]; ok {
2444 canonicalTypeLock.RUnlock()
2445 return r
2447 canonicalTypeLock.RUnlock()
2448 canonicalTypeLock.Lock()
2449 if r, ok := canonicalType[s]; ok {
2450 canonicalTypeLock.Unlock()
2451 return r
2453 canonicalType[s] = t
2454 canonicalTypeLock.Unlock()
2455 return t
2458 func toType(p *rtype) Type {
2459 if p == nil {
2460 return nil
2462 return canonicalize(p)
2465 // ifaceIndir reports whether t is stored indirectly in an interface value.
2466 func ifaceIndir(t *rtype) bool {
2467 return t.kind&kindDirectIface == 0
2470 // Layout matches runtime.gobitvector (well enough).
2471 type bitVector struct {
2472 n uint32 // number of bits
2473 data []byte
2476 // append a bit to the bitmap.
2477 func (bv *bitVector) append(bit uint8) {
2478 if bv.n%8 == 0 {
2479 bv.data = append(bv.data, 0)
2481 bv.data[bv.n/8] |= bit << (bv.n % 8)
2482 bv.n++
2485 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
2486 if t.kind&kindNoPointers != 0 {
2487 return
2490 switch Kind(t.kind & kindMask) {
2491 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
2492 // 1 pointer at start of representation
2493 for bv.n < uint32(offset/uintptr(ptrSize)) {
2494 bv.append(0)
2496 bv.append(1)
2498 case Interface:
2499 // 2 pointers
2500 for bv.n < uint32(offset/uintptr(ptrSize)) {
2501 bv.append(0)
2503 bv.append(1)
2504 bv.append(1)
2506 case Array:
2507 // repeat inner type
2508 tt := (*arrayType)(unsafe.Pointer(t))
2509 for i := 0; i < int(tt.len); i++ {
2510 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
2513 case Struct:
2514 // apply fields
2515 tt := (*structType)(unsafe.Pointer(t))
2516 for i := range tt.fields {
2517 f := &tt.fields[i]
2518 addTypeBits(bv, offset+f.offset(), f.typ)