1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
26 // Type is the representation of a Go type.
28 // Not all methods apply to all kinds of types. Restrictions,
29 // if any, are noted in the documentation for each method.
30 // Use the Kind method to find out the kind of type before
31 // calling kind-specific methods. Calling a method
32 // inappropriate to the kind of type causes a run-time panic.
34 // Type values are comparable, such as with the == operator,
35 // so they can be used as map keys.
36 // Two Type values are equal if they represent identical types.
38 // Methods applicable to all types.
40 // Align returns the alignment in bytes of a value of
41 // this type when allocated in memory.
44 // FieldAlign returns the alignment in bytes of a value of
45 // this type when used as a field in a struct.
48 // Method returns the i'th method in the type's method set.
49 // It panics if i is not in the range [0, NumMethod()).
51 // For a non-interface type T or *T, the returned Method's Type and Func
52 // fields describe a function whose first argument is the receiver.
54 // For an interface type, the returned Method's Type field gives the
55 // method signature, without a receiver, and the Func field is nil.
58 // MethodByName returns the method with that name in the type's
59 // method set and a boolean indicating if the method was found.
61 // For a non-interface type T or *T, the returned Method's Type and Func
62 // fields describe a function whose first argument is the receiver.
64 // For an interface type, the returned Method's Type field gives the
65 // method signature, without a receiver, and the Func field is nil.
66 MethodByName(string) (Method
, bool)
68 // NumMethod returns the number of exported methods in the type's method set.
71 // Name returns the type's name within its package.
72 // It returns an empty string for unnamed types.
75 // PkgPath returns a named type's package path, that is, the import path
76 // that uniquely identifies the package, such as "encoding/base64".
77 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int),
78 // the package path will be the empty string.
81 // Size returns the number of bytes needed to store
82 // a value of the given type; it is analogous to unsafe.Sizeof.
85 // String returns a string representation of the type.
86 // The string representation may use shortened package names
87 // (e.g., base64 instead of "encoding/base64") and is not
88 // guaranteed to be unique among types. To test for type identity,
89 // compare the Types directly.
92 // Used internally by gccgo--the string retaining quoting.
95 // Kind returns the specific kind of this type.
98 // Implements reports whether the type implements the interface type u.
99 Implements(u Type
) bool
101 // AssignableTo reports whether a value of the type is assignable to type u.
102 AssignableTo(u Type
) bool
104 // ConvertibleTo reports whether a value of the type is convertible to type u.
105 ConvertibleTo(u Type
) bool
107 // Comparable reports whether values of this type are comparable.
110 // Methods applicable only to some types, depending on Kind.
111 // The methods allowed for each kind are:
113 // Int*, Uint*, Float*, Complex*: Bits
115 // Chan: ChanDir, Elem
116 // Func: In, NumIn, Out, NumOut, IsVariadic.
120 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
122 // Bits returns the size of the type in bits.
123 // It panics if the type's Kind is not one of the
124 // sized or unsized Int, Uint, Float, or Complex kinds.
127 // ChanDir returns a channel type's direction.
128 // It panics if the type's Kind is not Chan.
131 // IsVariadic reports whether a function type's final input parameter
132 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
133 // implicit actual type []T.
135 // For concreteness, if t represents func(x int, y ... float64), then
138 // t.In(0) is the reflect.Type for "int"
139 // t.In(1) is the reflect.Type for "[]float64"
140 // t.IsVariadic() == true
142 // IsVariadic panics if the type's Kind is not Func.
145 // Elem returns a type's element type.
146 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
149 // Field returns a struct type's i'th field.
150 // It panics if the type's Kind is not Struct.
151 // It panics if i is not in the range [0, NumField()).
152 Field(i
int) StructField
154 // FieldByIndex returns the nested field corresponding
155 // to the index sequence. It is equivalent to calling Field
156 // successively for each index i.
157 // It panics if the type's Kind is not Struct.
158 FieldByIndex(index
[]int) StructField
160 // FieldByName returns the struct field with the given name
161 // and a boolean indicating if the field was found.
162 FieldByName(name
string) (StructField
, bool)
164 // FieldByNameFunc returns the struct field with a name
165 // that satisfies the match function and a boolean indicating if
166 // the field was found.
168 // FieldByNameFunc considers the fields in the struct itself
169 // and then the fields in any anonymous structs, in breadth first order,
170 // stopping at the shallowest nesting depth containing one or more
171 // fields satisfying the match function. If multiple fields at that depth
172 // satisfy the match function, they cancel each other
173 // and FieldByNameFunc returns no match.
174 // This behavior mirrors Go's handling of name lookup in
175 // structs containing anonymous fields.
176 FieldByNameFunc(match
func(string) bool) (StructField
, bool)
178 // In returns the type of a function type's i'th input parameter.
179 // It panics if the type's Kind is not Func.
180 // It panics if i is not in the range [0, NumIn()).
183 // Key returns a map type's key type.
184 // It panics if the type's Kind is not Map.
187 // Len returns an array type's length.
188 // It panics if the type's Kind is not Array.
191 // NumField returns a struct type's field count.
192 // It panics if the type's Kind is not Struct.
195 // NumIn returns a function type's input parameter count.
196 // It panics if the type's Kind is not Func.
199 // NumOut returns a function type's output parameter count.
200 // It panics if the type's Kind is not Func.
203 // Out returns the type of a function type's i'th output parameter.
204 // It panics if the type's Kind is not Func.
205 // It panics if i is not in the range [0, NumOut()).
209 uncommon() *uncommonType
212 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
213 // if the names are equal, even if they are unexported names originating
214 // in different packages. The practical effect of this is that the result of
215 // t.FieldByName("x") is not well defined if the struct type t contains
216 // multiple fields named x (embedded from different packages).
217 // FieldByName may return one of the fields named x or may report that there are none.
218 // See https://golang.org/issue/4876 for more details.
221 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
222 * A few are known to ../runtime/type.go to convey to debuggers.
223 * They are also known to ../runtime/type.go.
226 // A Kind represents the specific kind of type that a Type represents.
227 // The zero Kind is not a valid kind.
260 // rtype is the common implementation of most values.
261 // It is embedded in other, public struct types, but always
262 // with a unique tag like `reflect:"array"` or `reflect:"ptr"`
263 // so that code cannot convert from, say, *arrayType to *ptrType.
265 // rtype must be kept in sync with ../runtime/type.go:/^type._type.
268 ptrdata
uintptr // size of memory prefix holding all pointers
269 hash
uint32 // hash of type; avoids computation in hash tables
270 kind
uint8 // enumeration for C
271 align
int8 // alignment of variable with this type
272 fieldAlign
uint8 // alignment of struct field with this type
273 _
uint8 // unused/padding
275 hashfn
func(unsafe
.Pointer
, uintptr) uintptr // hash function
276 equalfn
func(unsafe
.Pointer
, unsafe
.Pointer
) bool // equality function
278 gcdata
*byte // garbage collection data
279 string *string // string form; unnecessary but undeniably useful
280 *uncommonType
// (relatively) uncommon fields
281 ptrToThis
*rtype
// type for pointer to this type, if used in binary or has methods
284 // Method on non-interface type
286 name
*string // name of method
287 pkgPath
*string // nil for exported Names; otherwise import path
288 mtyp
*rtype
// method type (without receiver)
289 typ
*rtype
// .(*FuncType) underneath (with receiver)
290 tfn unsafe
.Pointer
// fn used for normal method call
293 // uncommonType is present only for types with names or methods
294 // (if T is a named type, the uncommonTypes for T and *T have methods).
295 // Using a pointer to this struct reduces the overall size required
296 // to describe an unnamed type with no methods.
297 type uncommonType
struct {
298 name
*string // name of type
299 pkgPath
*string // import path; nil for built-in types like int, string
300 methods
[]method
// methods associated with type
303 // ChanDir represents a channel type's direction.
307 RecvDir ChanDir
= 1 << iota // <-chan
309 BothDir
= RecvDir | SendDir
// chan
312 // arrayType represents a fixed array type.
313 type arrayType
struct {
314 rtype
`reflect:"array"`
315 elem
*rtype
// array element type
316 slice
*rtype
// slice type
320 // chanType represents a channel type.
321 type chanType
struct {
322 rtype
`reflect:"chan"`
323 elem
*rtype
// channel element type
324 dir
uintptr // channel direction (ChanDir)
327 // funcType represents a function type.
328 type funcType
struct {
329 rtype
`reflect:"func"`
330 dotdotdot
bool // last input parameter is ...
331 in
[]*rtype
// input parameter types
332 out
[]*rtype
// output parameter types
335 // imethod represents a method on an interface type
336 type imethod
struct {
337 name
*string // name of method
338 pkgPath
*string // nil for exported Names; otherwise import path
339 typ
*rtype
// .(*FuncType) underneath
342 // interfaceType represents an interface type.
343 type interfaceType
struct {
344 rtype
`reflect:"interface"`
345 methods
[]imethod
// sorted by hash
348 // mapType represents a map type.
349 type mapType
struct {
350 rtype
`reflect:"map"`
351 key
*rtype
// map key type
352 elem
*rtype
// map element (value) type
353 bucket
*rtype
// internal bucket structure
354 hmap
*rtype
// internal map header
355 keysize
uint8 // size of key slot
356 indirectkey
uint8 // store ptr to key instead of key itself
357 valuesize
uint8 // size of value slot
358 indirectvalue
uint8 // store ptr to value instead of value itself
359 bucketsize
uint16 // size of bucket
360 reflexivekey
bool // true if k==k for all keys
361 needkeyupdate
bool // true if we need to update key on an overwrite
364 // ptrType represents a pointer type.
365 type ptrType
struct {
366 rtype
`reflect:"ptr"`
367 elem
*rtype
// pointer element (pointed at) type
370 // sliceType represents a slice type.
371 type sliceType
struct {
372 rtype
`reflect:"slice"`
373 elem
*rtype
// slice element type
377 type structField
struct {
378 name
*string // name is always non-empty
379 pkgPath
*string // nil for exported Names; otherwise import path
380 typ
*rtype
// type of field
381 tag
*string // nil if no tag
382 offsetAnon
uintptr // byte offset of field<<1 | isAnonymous
385 func (f
*structField
) offset() uintptr {
386 return f
.offsetAnon
>> 1
389 func (f
*structField
) anon() bool {
390 return f
.offsetAnon
&1 != 0
393 // structType represents a struct type.
394 type structType
struct {
395 rtype
`reflect:"struct"`
396 fields
[]structField
// sorted by offset
400 * The compiler knows the exact layout of all the data structures above.
401 * The compiler does not know about the data structures and methods below.
404 // Method represents a single method.
406 // Name is the method name.
407 // PkgPath is the package path that qualifies a lower case (unexported)
408 // method name. It is empty for upper case (exported) method names.
409 // The combination of PkgPath and Name uniquely identifies a method
411 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
415 Type Type
// method type
416 Func Value
// func with receiver as first argument
417 Index
int // index for Type.Method
421 kindDirectIface
= 1 << 5
422 kindGCProg
= 1 << 6 // Type.gc points to GC program
423 kindNoPointers
= 1 << 7
424 kindMask
= (1 << 5) - 1
427 func (k Kind
) String() string {
428 if int(k
) < len(kindNames
) {
431 return "kind" + strconv
.Itoa(int(k
))
434 var kindNames
= []string{
450 Complex64
: "complex64",
451 Complex128
: "complex128",
455 Interface
: "interface",
461 UnsafePointer
: "unsafe.Pointer",
464 func (t
*uncommonType
) uncommon() *uncommonType
{
468 func (t
*uncommonType
) PkgPath() string {
469 if t
== nil || t
.pkgPath
== nil {
475 func (t
*uncommonType
) Name() string {
476 if t
== nil || t
.name
== nil {
482 func (t
*rtype
) rawString() string { return *t
.string }
484 func (t
*rtype
) String() string {
485 // For gccgo, strip out quoted strings.
488 r
:= make([]byte, len(s
))
490 for i
:= 0; i
< len(s
); i
++ {
501 func (t
*rtype
) Size() uintptr { return t
.size
}
503 func (t
*rtype
) Bits() int {
505 panic("reflect: Bits of nil Type")
508 if k
< Int || k
> Complex128
{
509 panic("reflect: Bits of non-arithmetic Type " + t
.String())
511 return int(t
.size
) * 8
514 func (t
*rtype
) Align() int { return int(t
.align
) }
516 func (t
*rtype
) FieldAlign() int { return int(t
.fieldAlign
) }
518 func (t
*rtype
) Kind() Kind
{ return Kind(t
.kind
& kindMask
) }
520 func (t
*rtype
) pointers() bool { return t
.kind
&kindNoPointers
== 0 }
522 func (t
*rtype
) common() *rtype
{ return t
}
524 var methodCache sync
.Map
// map[*rtype][]method
526 func (t
*rtype
) exportedMethods() []method
{
527 methodsi
, found
:= methodCache
.Load(t
)
529 return methodsi
.([]method
)
538 for _
, m
:= range allm
{
539 if m
.pkgPath
!= nil {
548 methods
= make([]method
, 0, len(allm
))
549 for _
, m
:= range allm
{
550 if m
.pkgPath
== nil {
551 methods
= append(methods
, m
)
554 methods
= methods
[:len(methods
):len(methods
)]
557 methodsi
, _
= methodCache
.LoadOrStore(t
, methods
)
558 return methodsi
.([]method
)
561 func (t
*rtype
) NumMethod() int {
562 if t
.Kind() == Interface
{
563 tt
:= (*interfaceType
)(unsafe
.Pointer(t
))
564 return tt
.NumMethod()
566 if t
.uncommonType
== nil {
567 return 0 // avoid methodCache synchronization
569 return len(t
.exportedMethods())
572 func (t
*rtype
) Method(i
int) (m Method
) {
573 if t
.Kind() == Interface
{
574 tt
:= (*interfaceType
)(unsafe
.Pointer(t
))
577 methods
:= t
.exportedMethods()
578 if i
< 0 || i
>= len(methods
) {
579 panic("reflect: Method index out of range")
588 x
:= new(unsafe
.Pointer
)
589 *x
= unsafe
.Pointer(&p
.tfn
)
590 m
.Func
= Value
{mt
, unsafe
.Pointer(x
), fl | flagIndir | flagMethodFn
}
595 func (t
*rtype
) MethodByName(name
string) (m Method
, ok
bool) {
596 if t
.Kind() == Interface
{
597 tt
:= (*interfaceType
)(unsafe
.Pointer(t
))
598 return tt
.MethodByName(name
)
602 return Method
{}, false
604 utmethods
:= ut
.methods
606 for i
:= 0; i
< len(utmethods
); i
++ {
608 if p
.pkgPath
== nil {
609 if p
.name
!= nil && *p
.name
== name
{
610 return t
.Method(eidx
), true
615 return Method
{}, false
618 func (t
*rtype
) PkgPath() string {
619 return t
.uncommonType
.PkgPath()
622 func (t
*rtype
) Name() string {
623 return t
.uncommonType
.Name()
626 func (t
*rtype
) ChanDir() ChanDir
{
627 if t
.Kind() != Chan
{
628 panic("reflect: ChanDir of non-chan type")
630 tt
:= (*chanType
)(unsafe
.Pointer(t
))
631 return ChanDir(tt
.dir
)
634 func (t
*rtype
) IsVariadic() bool {
635 if t
.Kind() != Func
{
636 panic("reflect: IsVariadic of non-func type")
638 tt
:= (*funcType
)(unsafe
.Pointer(t
))
642 func (t
*rtype
) Elem() Type
{
645 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
646 return toType(tt
.elem
)
648 tt
:= (*chanType
)(unsafe
.Pointer(t
))
649 return toType(tt
.elem
)
651 tt
:= (*mapType
)(unsafe
.Pointer(t
))
652 return toType(tt
.elem
)
654 tt
:= (*ptrType
)(unsafe
.Pointer(t
))
655 return toType(tt
.elem
)
657 tt
:= (*sliceType
)(unsafe
.Pointer(t
))
658 return toType(tt
.elem
)
660 panic("reflect: Elem of invalid type")
663 func (t
*rtype
) Field(i
int) StructField
{
664 if t
.Kind() != Struct
{
665 panic("reflect: Field of non-struct type")
667 tt
:= (*structType
)(unsafe
.Pointer(t
))
671 func (t
*rtype
) FieldByIndex(index
[]int) StructField
{
672 if t
.Kind() != Struct
{
673 panic("reflect: FieldByIndex of non-struct type")
675 tt
:= (*structType
)(unsafe
.Pointer(t
))
676 return tt
.FieldByIndex(index
)
679 func (t
*rtype
) FieldByName(name
string) (StructField
, bool) {
680 if t
.Kind() != Struct
{
681 panic("reflect: FieldByName of non-struct type")
683 tt
:= (*structType
)(unsafe
.Pointer(t
))
684 return tt
.FieldByName(name
)
687 func (t
*rtype
) FieldByNameFunc(match
func(string) bool) (StructField
, bool) {
688 if t
.Kind() != Struct
{
689 panic("reflect: FieldByNameFunc of non-struct type")
691 tt
:= (*structType
)(unsafe
.Pointer(t
))
692 return tt
.FieldByNameFunc(match
)
695 func (t
*rtype
) In(i
int) Type
{
696 if t
.Kind() != Func
{
697 panic("reflect: In of non-func type")
699 tt
:= (*funcType
)(unsafe
.Pointer(t
))
700 return toType(tt
.in
[i
])
703 func (t
*rtype
) Key() Type
{
705 panic("reflect: Key of non-map type")
707 tt
:= (*mapType
)(unsafe
.Pointer(t
))
708 return toType(tt
.key
)
711 func (t
*rtype
) Len() int {
712 if t
.Kind() != Array
{
713 panic("reflect: Len of non-array type")
715 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
719 func (t
*rtype
) NumField() int {
720 if t
.Kind() != Struct
{
721 panic("reflect: NumField of non-struct type")
723 tt
:= (*structType
)(unsafe
.Pointer(t
))
724 return len(tt
.fields
)
727 func (t
*rtype
) NumIn() int {
728 if t
.Kind() != Func
{
729 panic("reflect: NumIn of non-func type")
731 tt
:= (*funcType
)(unsafe
.Pointer(t
))
735 func (t
*rtype
) NumOut() int {
736 if t
.Kind() != Func
{
737 panic("reflect: NumOut of non-func type")
739 tt
:= (*funcType
)(unsafe
.Pointer(t
))
743 func (t
*rtype
) Out(i
int) Type
{
744 if t
.Kind() != Func
{
745 panic("reflect: Out of non-func type")
747 tt
:= (*funcType
)(unsafe
.Pointer(t
))
748 return toType(tt
.out
[i
])
753 // The whySafe string is ignored, so that the function still inlines
754 // as efficiently as p+x, but all call sites should use the string to
755 // record why the addition is safe, which is to say why the addition
756 // does not cause x to advance to the very end of p's allocation
757 // and therefore point incorrectly at the next block in memory.
758 func add(p unsafe
.Pointer
, x
uintptr, whySafe
string) unsafe
.Pointer
{
759 return unsafe
.Pointer(uintptr(p
) + x
)
762 func (d ChanDir
) String() string {
771 return "ChanDir" + strconv
.Itoa(int(d
))
774 // Method returns the i'th method in the type's method set.
775 func (t
*interfaceType
) Method(i
int) (m Method
) {
776 if i
< 0 || i
>= len(t
.methods
) {
781 if p
.pkgPath
!= nil {
782 m
.PkgPath
= *p
.pkgPath
784 m
.Type
= toType(p
.typ
)
789 // NumMethod returns the number of interface methods in the type's method set.
790 func (t
*interfaceType
) NumMethod() int { return len(t
.methods
) }
792 // MethodByName method with the given name in the type's method set.
793 func (t
*interfaceType
) MethodByName(name
string) (m Method
, ok
bool) {
798 for i
:= range t
.methods
{
801 return t
.Method(i
), true
807 // A StructField describes a single field in a struct.
808 type StructField
struct {
809 // Name is the field name.
811 // PkgPath is the package path that qualifies a lower case (unexported)
812 // field name. It is empty for upper case (exported) field names.
813 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
816 Type Type
// field type
817 Tag StructTag
// field tag string
818 Offset
uintptr // offset within struct, in bytes
819 Index
[]int // index sequence for Type.FieldByIndex
820 Anonymous
bool // is an embedded field
823 // A StructTag is the tag string in a struct field.
825 // By convention, tag strings are a concatenation of
826 // optionally space-separated key:"value" pairs.
827 // Each key is a non-empty string consisting of non-control
828 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
829 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
830 // characters and Go string literal syntax.
831 type StructTag
string
833 // Get returns the value associated with key in the tag string.
834 // If there is no such key in the tag, Get returns the empty string.
835 // If the tag does not have the conventional format, the value
836 // returned by Get is unspecified. To determine whether a tag is
837 // explicitly set to the empty string, use Lookup.
838 func (tag StructTag
) Get(key
string) string {
839 v
, _
:= tag
.Lookup(key
)
843 // Lookup returns the value associated with key in the tag string.
844 // If the key is present in the tag the value (which may be empty)
845 // is returned. Otherwise the returned value will be the empty string.
846 // The ok return value reports whether the value was explicitly set in
847 // the tag string. If the tag does not have the conventional format,
848 // the value returned by Lookup is unspecified.
849 func (tag StructTag
) Lookup(key
string) (value
string, ok
bool) {
850 // When modifying this code, also update the validateStructTag code
851 // in cmd/vet/structtag.go.
854 // Skip leading space.
856 for i
< len(tag
) && tag
[i
] == ' ' {
864 // Scan to colon. A space, a quote or a control character is a syntax error.
865 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
866 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
867 // as it is simpler to inspect the tag's bytes than the tag's runes.
869 for i
< len(tag
) && tag
[i
] > ' ' && tag
[i
] != ':' && tag
[i
] != '"' && tag
[i
] != 0x7f {
872 if i
== 0 || i
+1 >= len(tag
) || tag
[i
] != ':' || tag
[i
+1] != '"' {
875 name
:= string(tag
[:i
])
878 // Scan quoted string to find value.
880 for i
< len(tag
) && tag
[i
] != '"' {
889 qvalue
:= string(tag
[:i
+1])
893 value
, err
:= strconv
.Unquote(qvalue
)
903 // Field returns the i'th struct field.
904 func (t
*structType
) Field(i
int) (f StructField
) {
905 if i
< 0 || i
>= len(t
.fields
) {
906 panic("reflect: Field index out of bounds")
909 f
.Type
= toType(p
.typ
)
911 f
.Anonymous
= p
.anon()
912 if p
.pkgPath
!= nil {
913 f
.PkgPath
= *p
.pkgPath
916 f
.Tag
= StructTag(*p
.tag
)
918 f
.Offset
= p
.offset()
920 // NOTE(rsc): This is the only allocation in the interface
921 // presented by a reflect.Type. It would be nice to avoid,
922 // at least in the common cases, but we need to make sure
923 // that misbehaving clients of reflect cannot affect other
924 // uses of reflect. One possibility is CL 5371098, but we
925 // postponed that ugliness until there is a demonstrated
926 // need for the performance. This is issue 2320.
931 // TODO(gri): Should there be an error/bool indicator if the index
932 // is wrong for FieldByIndex?
934 // FieldByIndex returns the nested field corresponding to index.
935 func (t
*structType
) FieldByIndex(index
[]int) (f StructField
) {
936 f
.Type
= toType(&t
.rtype
)
937 for i
, x
:= range index
{
940 if ft
.Kind() == Ptr
&& ft
.Elem().Kind() == Struct
{
950 // A fieldScan represents an item on the fieldByNameFunc scan work list.
951 type fieldScan
struct {
956 // FieldByNameFunc returns the struct field with a name that satisfies the
957 // match function and a boolean to indicate if the field was found.
958 func (t
*structType
) FieldByNameFunc(match
func(string) bool) (result StructField
, ok
bool) {
959 // This uses the same condition that the Go language does: there must be a unique instance
960 // of the match at a given depth level. If there are multiple instances of a match at the
961 // same depth, they annihilate each other and inhibit any possible match at a lower level.
962 // The algorithm is breadth first search, one depth level at a time.
964 // The current and next slices are work queues:
965 // current lists the fields to visit on this depth level,
966 // and next lists the fields on the next lower level.
967 current
:= []fieldScan
{}
968 next
:= []fieldScan
{{typ
: t
}}
970 // nextCount records the number of times an embedded type has been
971 // encountered and considered for queueing in the 'next' slice.
972 // We only queue the first one, but we increment the count on each.
973 // If a struct type T can be reached more than once at a given depth level,
974 // then it annihilates itself and need not be considered at all when we
975 // process that next depth level.
976 var nextCount
map[*structType
]int
978 // visited records the structs that have been considered already.
979 // Embedded pointer fields can create cycles in the graph of
980 // reachable embedded types; visited avoids following those cycles.
981 // It also avoids duplicated effort: if we didn't find the field in an
982 // embedded type T at level 2, we won't find it in one at level 4 either.
983 visited
:= map[*structType
]bool{}
986 current
, next
= next
, current
[:0]
990 // Process all the fields at this depth, now listed in 'current'.
991 // The loop queues embedded fields found in 'next', for processing during the next
992 // iteration. The multiplicity of the 'current' field counts is recorded
993 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
994 for _
, scan
:= range current
{
997 // We've looked through this type before, at a higher level.
998 // That higher level would shadow the lower level we're now at,
999 // so this one can't be useful to us. Ignore it.
1003 for i
:= range t
.fields
{
1005 // Find name and (for anonymous field) type for field f.
1009 // Anonymous field of type T or *T.
1011 if ntyp
.Kind() == Ptr
{
1012 ntyp
= ntyp
.Elem().common()
1019 if count
[t
] > 1 || ok
{
1020 // Name appeared multiple times at this level: annihilate.
1021 return StructField
{}, false
1025 result
.Index
= append(result
.Index
, scan
.index
...)
1026 result
.Index
= append(result
.Index
, i
)
1031 // Queue embedded struct fields for processing with next level,
1032 // but only if we haven't seen a match yet at this level and only
1033 // if the embedded types haven't already been queued.
1034 if ok || ntyp
== nil || ntyp
.Kind() != Struct
{
1037 ntyp
= toType(ntyp
).common()
1038 styp
:= (*structType
)(unsafe
.Pointer(ntyp
))
1039 if nextCount
[styp
] > 0 {
1040 nextCount
[styp
] = 2 // exact multiple doesn't matter
1043 if nextCount
== nil {
1044 nextCount
= map[*structType
]int{}
1048 nextCount
[styp
] = 2 // exact multiple doesn't matter
1051 index
= append(index
, scan
.index
...)
1052 index
= append(index
, i
)
1053 next
= append(next
, fieldScan
{styp
, index
})
1063 // FieldByName returns the struct field with the given name
1064 // and a boolean to indicate if the field was found.
1065 func (t
*structType
) FieldByName(name
string) (f StructField
, present
bool) {
1066 // Quick check for top-level name, or struct without anonymous fields.
1069 for i
:= range t
.fields
{
1071 if *tf
.name
== name
{
1072 return t
.Field(i
), true
1082 return t
.FieldByNameFunc(func(s
string) bool { return s
== name
})
1085 // TypeOf returns the reflection Type that represents the dynamic type of i.
1086 // If i is a nil interface value, TypeOf returns nil.
1087 func TypeOf(i
interface{}) Type
{
1088 eface
:= *(*emptyInterface
)(unsafe
.Pointer(&i
))
1089 return toType(eface
.typ
)
1092 // ptrMap is the cache for PtrTo.
1093 var ptrMap sync
.Map
// map[*rtype]*ptrType
1095 // PtrTo returns the pointer type with element t.
1096 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1097 func PtrTo(t Type
) Type
{
1098 return t
.(*rtype
).ptrTo()
1101 func (t
*rtype
) ptrTo() *rtype
{
1102 if p
:= t
.ptrToThis
; p
!= nil {
1107 if pi
, ok
:= ptrMap
.Load(t
); ok
{
1108 return &pi
.(*ptrType
).rtype
1111 s
:= "*" + *t
.string
1113 canonicalTypeLock
.RLock()
1114 r
, ok
:= canonicalType
[s
]
1115 canonicalTypeLock
.RUnlock()
1117 p
:= (*ptrType
)(unsafe
.Pointer(r
.(*rtype
)))
1118 pi
, _
:= ptrMap
.LoadOrStore(t
, p
)
1119 return &pi
.(*ptrType
).rtype
1122 // Create a new ptrType starting with the description
1123 // of an *unsafe.Pointer.
1124 var iptr
interface{} = (*unsafe
.Pointer
)(nil)
1125 prototype
:= *(**ptrType
)(unsafe
.Pointer(&iptr
))
1131 // For the type structures linked into the binary, the
1132 // compiler provides a good hash of the string.
1133 // Create a good hash for the new string by using
1134 // the FNV-1 hash's mixing function to combine the
1135 // old hash and the new "*".
1136 // p.hash = fnv1(t.hash, '*')
1137 // This is the gccgo version.
1138 pp
.hash
= (t
.hash
<< 4) + 9
1140 pp
.uncommonType
= nil
1144 q
:= canonicalize(&pp
.rtype
)
1145 p
:= (*ptrType
)(unsafe
.Pointer(q
.(*rtype
)))
1147 pi
, _
:= ptrMap
.LoadOrStore(t
, p
)
1148 return &pi
.(*ptrType
).rtype
1151 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1152 func fnv1(x
uint32, list
...byte) uint32 {
1153 for _
, b
:= range list
{
1154 x
= x
*16777619 ^ uint32(b
)
1159 func (t
*rtype
) Implements(u Type
) bool {
1161 panic("reflect: nil type passed to Type.Implements")
1163 if u
.Kind() != Interface
{
1164 panic("reflect: non-interface type passed to Type.Implements")
1166 return implements(u
.(*rtype
), t
)
1169 func (t
*rtype
) AssignableTo(u Type
) bool {
1171 panic("reflect: nil type passed to Type.AssignableTo")
1174 return directlyAssignable(uu
, t
) ||
implements(uu
, t
)
1177 func (t
*rtype
) ConvertibleTo(u Type
) bool {
1179 panic("reflect: nil type passed to Type.ConvertibleTo")
1182 return convertOp(uu
, t
) != nil
1185 func (t
*rtype
) Comparable() bool {
1187 case Bool
, Int
, Int8
, Int16
, Int32
, Int64
,
1188 Uint
, Uint8
, Uint16
, Uint32
, Uint64
, Uintptr
,
1189 Float32
, Float64
, Complex64
, Complex128
,
1190 Chan
, Interface
, Ptr
, String
, UnsafePointer
:
1193 case Func
, Map
, Slice
:
1197 return (*arrayType
)(unsafe
.Pointer(t
)).elem
.Comparable()
1200 tt
:= (*structType
)(unsafe
.Pointer(t
))
1201 for i
:= range tt
.fields
{
1202 if !tt
.fields
[i
].typ
.Comparable() {
1209 panic("reflect: impossible")
1213 // implements reports whether the type V implements the interface type T.
1214 func implements(T
, V
*rtype
) bool {
1215 if T
.Kind() != Interface
{
1218 t
:= (*interfaceType
)(unsafe
.Pointer(T
))
1219 if len(t
.methods
) == 0 {
1223 // The same algorithm applies in both cases, but the
1224 // method tables for an interface type and a concrete type
1225 // are different, so the code is duplicated.
1226 // In both cases the algorithm is a linear scan over the two
1227 // lists - T's methods and V's methods - simultaneously.
1228 // Since method tables are stored in a unique sorted order
1229 // (alphabetical, with no duplicate method names), the scan
1230 // through V's methods must hit a match for each of T's
1231 // methods along the way, or else V does not implement T.
1232 // This lets us run the scan in overall linear time instead of
1233 // the quadratic time a naive search would require.
1234 // See also ../runtime/iface.go.
1235 if V
.Kind() == Interface
{
1236 v
:= (*interfaceType
)(unsafe
.Pointer(V
))
1238 for j
:= 0; j
< len(v
.methods
); j
++ {
1241 if *vm
.name
== *tm
.name
&& (vm
.pkgPath
== tm
.pkgPath ||
(vm
.pkgPath
!= nil && tm
.pkgPath
!= nil && *vm
.pkgPath
== *tm
.pkgPath
)) && toType(vm
.typ
).common() == toType(tm
.typ
).common() {
1242 if i
++; i
>= len(t
.methods
) {
1255 for j
:= 0; j
< len(v
.methods
); j
++ {
1258 if *vm
.name
== *tm
.name
&& (vm
.pkgPath
== tm
.pkgPath ||
(vm
.pkgPath
!= nil && tm
.pkgPath
!= nil && *vm
.pkgPath
== *tm
.pkgPath
)) && toType(vm
.mtyp
).common() == toType(tm
.typ
).common() {
1259 if i
++; i
>= len(t
.methods
) {
1267 // directlyAssignable reports whether a value x of type V can be directly
1268 // assigned (using memmove) to a value of type T.
1269 // https://golang.org/doc/go_spec.html#Assignability
1270 // Ignoring the interface rules (implemented elsewhere)
1271 // and the ideal constant rules (no ideal constants at run time).
1272 func directlyAssignable(T
, V
*rtype
) bool {
1273 // x's type V is identical to T?
1278 // Otherwise at least one of T and V must be unnamed
1279 // and they must have the same kind.
1280 if T
.Name() != "" && V
.Name() != "" || T
.Kind() != V
.Kind() {
1284 // x's type T and V must have identical underlying types.
1285 return haveIdenticalUnderlyingType(T
, V
, true)
1288 func haveIdenticalType(T
, V Type
, cmpTags
bool) bool {
1293 if T
.Name() != V
.Name() || T
.Kind() != V
.Kind() {
1297 return haveIdenticalUnderlyingType(T
.common(), V
.common(), false)
1300 func haveIdenticalUnderlyingType(T
, V
*rtype
, cmpTags
bool) bool {
1306 if kind
!= V
.Kind() {
1310 // Non-composite types of equal kind have same underlying type
1311 // (the predefined instance of the type).
1312 if Bool
<= kind
&& kind
<= Complex128 || kind
== String || kind
== UnsafePointer
{
1319 return T
.Len() == V
.Len() && haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
)
1323 // x is a bidirectional channel value, T is a channel type,
1324 // and x's type V and T have identical element types.
1325 if V
.ChanDir() == BothDir
&& haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
) {
1329 // Otherwise continue test for identical underlying type.
1330 return V
.ChanDir() == T
.ChanDir() && haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
)
1333 t
:= (*funcType
)(unsafe
.Pointer(T
))
1334 v
:= (*funcType
)(unsafe
.Pointer(V
))
1335 if t
.dotdotdot
!= v
.dotdotdot ||
len(t
.in
) != len(v
.in
) ||
len(t
.out
) != len(v
.out
) {
1338 for i
, typ
:= range t
.in
{
1339 if !haveIdenticalType(typ
, v
.in
[i
], cmpTags
) {
1343 for i
, typ
:= range t
.out
{
1344 if !haveIdenticalType(typ
, v
.out
[i
], cmpTags
) {
1351 t
:= (*interfaceType
)(unsafe
.Pointer(T
))
1352 v
:= (*interfaceType
)(unsafe
.Pointer(V
))
1353 if len(t
.methods
) == 0 && len(v
.methods
) == 0 {
1356 // Might have the same methods but still
1357 // need a run time conversion.
1361 return haveIdenticalType(T
.Key(), V
.Key(), cmpTags
) && haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
)
1364 return haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
)
1367 t
:= (*structType
)(unsafe
.Pointer(T
))
1368 v
:= (*structType
)(unsafe
.Pointer(V
))
1369 if len(t
.fields
) != len(v
.fields
) {
1372 for i
:= range t
.fields
{
1375 if tf
.name
!= vf
.name
&& (tf
.name
== nil || vf
.name
== nil ||
*tf
.name
!= *vf
.name
) {
1378 if tf
.pkgPath
!= vf
.pkgPath
&& (tf
.pkgPath
== nil || vf
.pkgPath
== nil ||
*tf
.pkgPath
!= *vf
.pkgPath
) {
1381 if !haveIdenticalType(tf
.typ
, vf
.typ
, cmpTags
) {
1384 if cmpTags
&& tf
.tag
!= vf
.tag
&& (tf
.tag
== nil || vf
.tag
== nil ||
*tf
.tag
!= *vf
.tag
) {
1387 if tf
.offsetAnon
!= vf
.offsetAnon
{
1397 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1398 var lookupCache sync
.Map
// map[cacheKey]*rtype
1400 // A cacheKey is the key for use in the lookupCache.
1401 // Four values describe any of the types we are looking for:
1402 // type kind, one or two subtypes, and an extra integer.
1403 type cacheKey
struct {
1410 // The funcLookupCache caches FuncOf lookups.
1411 // FuncOf does not share the common lookupCache since cacheKey is not
1412 // sufficient to represent functions unambiguously.
1413 var funcLookupCache
struct {
1414 sync
.Mutex
// Guards stores (but not loads) on m.
1416 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1417 // Elements of m are append-only and thus safe for concurrent reading.
1421 // ChanOf returns the channel type with the given direction and element type.
1422 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1424 // The gc runtime imposes a limit of 64 kB on channel element types.
1425 // If t's size is equal to or exceeds this limit, ChanOf panics.
1426 func ChanOf(dir ChanDir
, t Type
) Type
{
1430 ckey
:= cacheKey
{Chan
, typ
, nil, uintptr(dir
)}
1431 if ch
, ok
:= lookupCache
.Load(ckey
); ok
{
1435 // This restriction is imposed by the gc compiler and the runtime.
1436 if typ
.size
>= 1<<16 {
1437 panic("reflect.ChanOf: element size too large")
1440 // Look in known types.
1441 // TODO: Precedence when constructing string.
1445 panic("reflect.ChanOf: invalid dir")
1447 s
= "chan<- " + *typ
.string
1449 s
= "<-chan " + *typ
.string
1451 s
= "chan " + *typ
.string
1454 // Make a channel type.
1455 var ichan
interface{} = (chan unsafe
.Pointer
)(nil)
1456 prototype
:= *(**chanType
)(unsafe
.Pointer(&ichan
))
1458 ch
.dir
= uintptr(dir
)
1461 // gccgo uses a different hash.
1462 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1464 if dir
&SendDir
!= 0 {
1467 if dir
&RecvDir
!= 0 {
1470 ch
.hash
+= typ
.hash
<< 2
1475 ch
.uncommonType
= nil
1478 ti
, _
:= lookupCache
.LoadOrStore(ckey
, &ch
.rtype
)
1482 func ismapkey(*rtype
) bool // implemented in runtime
1484 // MapOf returns the map type with the given key and element types.
1485 // For example, if k represents int and e represents string,
1486 // MapOf(k, e) represents map[int]string.
1488 // If the key type is not a valid map key type (that is, if it does
1489 // not implement Go's == operator), MapOf panics.
1490 func MapOf(key
, elem Type
) Type
{
1491 ktyp
:= key
.(*rtype
)
1492 etyp
:= elem
.(*rtype
)
1494 if !ismapkey(ktyp
) {
1495 panic("reflect.MapOf: invalid key type " + ktyp
.String())
1499 ckey
:= cacheKey
{Map
, ktyp
, etyp
, 0}
1500 if mt
, ok
:= lookupCache
.Load(ckey
); ok
{
1504 // Look in known types.
1505 s
:= "map[" + *ktyp
.string + "]" + *etyp
.string
1508 var imap
interface{} = (map[unsafe
.Pointer
]unsafe
.Pointer
)(nil)
1509 mt
:= **(**mapType
)(unsafe
.Pointer(&imap
))
1512 // gccgo uses a different hash
1513 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1514 mt
.hash
= ktyp
.hash
+ etyp
.hash
+ 2 + 14
1518 mt
.uncommonType
= nil
1521 mt
.bucket
= bucketOf(ktyp
, etyp
)
1522 if ktyp
.size
> maxKeySize
{
1523 mt
.keysize
= uint8(ptrSize
)
1526 mt
.keysize
= uint8(ktyp
.size
)
1529 if etyp
.size
> maxValSize
{
1530 mt
.valuesize
= uint8(ptrSize
)
1531 mt
.indirectvalue
= 1
1533 mt
.valuesize
= uint8(etyp
.size
)
1534 mt
.indirectvalue
= 0
1536 mt
.bucketsize
= uint16(mt
.bucket
.size
)
1537 mt
.reflexivekey
= isReflexive(ktyp
)
1538 mt
.needkeyupdate
= needKeyUpdate(ktyp
)
1540 ti
, _
:= lookupCache
.LoadOrStore(ckey
, &mt
.rtype
)
1544 // FuncOf returns the function type with the given argument and result types.
1545 // For example if k represents int and e represents string,
1546 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1548 // The variadic argument controls whether the function is variadic. FuncOf
1549 // panics if the in[len(in)-1] does not represent a slice and variadic is
1551 func FuncOf(in
, out
[]Type
, variadic
bool) Type
{
1552 if variadic
&& (len(in
) == 0 || in
[len(in
)-1].Kind() != Slice
) {
1553 panic("reflect.FuncOf: last arg of variadic func must be slice")
1556 // Make a func type.
1557 var ifunc
interface{} = (func())(nil)
1558 prototype
:= *(**funcType
)(unsafe
.Pointer(&ifunc
))
1562 // Build a hash and minimally populate ft.
1564 var fin
, fout
[]*rtype
1566 for _
, in
:= range in
{
1568 fin
= append(fin
, t
)
1569 hash
+= t
.hash
<< shift
1573 for _
, out
:= range out
{
1575 fout
= append(fout
, t
)
1576 hash
+= t
.hash
<< shift
1587 ft
.dotdotdot
= variadic
1590 if ts
, ok
:= funcLookupCache
.m
.Load(hash
); ok
{
1591 for _
, t
:= range ts
.([]*rtype
) {
1592 if haveIdenticalUnderlyingType(&ft
.rtype
, t
, true) {
1598 // Not in cache, lock and retry.
1599 funcLookupCache
.Lock()
1600 defer funcLookupCache
.Unlock()
1601 if ts
, ok
:= funcLookupCache
.m
.Load(hash
); ok
{
1602 for _
, t
:= range ts
.([]*rtype
) {
1603 if haveIdenticalUnderlyingType(&ft
.rtype
, t
, true) {
1609 addToCache
:= func(tt
*rtype
) Type
{
1611 if rti
, ok
:= funcLookupCache
.m
.Load(hash
); ok
{
1612 rts
= rti
.([]*rtype
)
1614 funcLookupCache
.m
.Store(hash
, append(rts
, tt
))
1620 // Populate the remaining fields of ft and store in cache.
1622 ft
.uncommonType
= nil
1624 return addToCache(&ft
.rtype
)
1627 // funcStr builds a string representation of a funcType.
1628 func funcStr(ft
*funcType
) string {
1629 repr
:= make([]byte, 0, 64)
1630 repr
= append(repr
, "func("...)
1631 for i
, t
:= range ft
.in
{
1633 repr
= append(repr
, ", "...)
1635 if ft
.dotdotdot
&& i
== len(ft
.in
)-1 {
1636 repr
= append(repr
, "..."...)
1637 repr
= append(repr
, *(*sliceType
)(unsafe
.Pointer(t
)).elem
.string...)
1639 repr
= append(repr
, *t
.string...)
1642 repr
= append(repr
, ')')
1643 if l
:= len(ft
.out
); l
== 1 {
1644 repr
= append(repr
, ' ')
1646 repr
= append(repr
, " ("...)
1648 for i
, t
:= range ft
.out
{
1650 repr
= append(repr
, ", "...)
1652 repr
= append(repr
, *t
.string...)
1654 if len(ft
.out
) > 1 {
1655 repr
= append(repr
, ')')
1660 // isReflexive reports whether the == operation on the type is reflexive.
1661 // That is, x == x for all values x of type t.
1662 func isReflexive(t
*rtype
) bool {
1664 case Bool
, Int
, Int8
, Int16
, Int32
, Int64
, Uint
, Uint8
, Uint16
, Uint32
, Uint64
, Uintptr
, Chan
, Ptr
, String
, UnsafePointer
:
1666 case Float32
, Float64
, Complex64
, Complex128
, Interface
:
1669 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
1670 return isReflexive(tt
.elem
)
1672 tt
:= (*structType
)(unsafe
.Pointer(t
))
1673 for _
, f
:= range tt
.fields
{
1674 if !isReflexive(f
.typ
) {
1680 // Func, Map, Slice, Invalid
1681 panic("isReflexive called on non-key type " + t
.String())
1685 // needKeyUpdate reports whether map overwrites require the key to be copied.
1686 func needKeyUpdate(t
*rtype
) bool {
1688 case Bool
, Int
, Int8
, Int16
, Int32
, Int64
, Uint
, Uint8
, Uint16
, Uint32
, Uint64
, Uintptr
, Chan
, Ptr
, UnsafePointer
:
1690 case Float32
, Float64
, Complex64
, Complex128
, Interface
, String
:
1691 // Float keys can be updated from +0 to -0.
1692 // String keys can be updated to use a smaller backing store.
1693 // Interfaces might have floats of strings in them.
1696 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
1697 return needKeyUpdate(tt
.elem
)
1699 tt
:= (*structType
)(unsafe
.Pointer(t
))
1700 for _
, f
:= range tt
.fields
{
1701 if needKeyUpdate(f
.typ
) {
1707 // Func, Map, Slice, Invalid
1708 panic("needKeyUpdate called on non-key type " + t
.String())
1712 // Make sure these routines stay in sync with ../../runtime/hashmap.go!
1713 // These types exist only for GC, so we only fill out GC relevant info.
1714 // Currently, that's just size and the GC program. We also fill in string
1715 // for possible debugging use.
1717 bucketSize
uintptr = 8
1718 maxKeySize
uintptr = 128
1719 maxValSize
uintptr = 128
1722 func bucketOf(ktyp
, etyp
*rtype
) *rtype
{
1723 // See comment on hmap.overflow in ../runtime/hashmap.go.
1725 if ktyp
.kind
&kindNoPointers
!= 0 && etyp
.kind
&kindNoPointers
!= 0 &&
1726 ktyp
.size
<= maxKeySize
&& etyp
.size
<= maxValSize
{
1727 kind
= kindNoPointers
1730 if ktyp
.size
> maxKeySize
{
1731 ktyp
= PtrTo(ktyp
).(*rtype
)
1733 if etyp
.size
> maxValSize
{
1734 etyp
= PtrTo(etyp
).(*rtype
)
1737 // Prepare GC data if any.
1738 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1739 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1740 // Note that since the key and value are known to be <= 128 bytes,
1741 // they're guaranteed to have bitmaps instead of GC programs.
1746 size
= align(size
, uintptr(ktyp
.fieldAlign
))
1747 size
+= bucketSize
* ktyp
.size
1748 size
= align(size
, uintptr(etyp
.fieldAlign
))
1749 size
+= bucketSize
* etyp
.size
1751 maxAlign
:= uintptr(ktyp
.fieldAlign
)
1752 if maxAlign
< uintptr(etyp
.fieldAlign
) {
1753 maxAlign
= uintptr(etyp
.fieldAlign
)
1755 if maxAlign
> ptrSize
{
1756 size
= align(size
, maxAlign
)
1757 size
+= align(ptrSize
, maxAlign
) - ptrSize
1758 } else if maxAlign
< ptrSize
{
1759 size
= align(size
, ptrSize
)
1766 if kind
!= kindNoPointers
{
1767 nptr
:= size
/ ptrSize
1768 mask
:= make([]byte, (nptr
+7)/8)
1770 psize
= align(psize
, uintptr(ktyp
.fieldAlign
))
1771 base
:= psize
/ ptrSize
1773 if ktyp
.kind
&kindNoPointers
== 0 {
1774 if ktyp
.kind
&kindGCProg
!= 0 {
1775 panic("reflect: unexpected GC program in MapOf")
1777 kmask
:= (*[16]byte)(unsafe
.Pointer(ktyp
.gcdata
))
1778 for i
:= uintptr(0); i
< ktyp
.ptrdata
/ptrSize
; i
++ {
1779 if (kmask
[i
/8]>>(i%8
))&1 != 0 {
1780 for j
:= uintptr(0); j
< bucketSize
; j
++ {
1781 word
:= base
+ j
*ktyp
.size
/ptrSize
+ i
1782 mask
[word
/8] |
= 1 << (word
% 8)
1787 psize
+= bucketSize
* ktyp
.size
1788 psize
= align(psize
, uintptr(etyp
.fieldAlign
))
1789 base
= psize
/ ptrSize
1791 if etyp
.kind
&kindNoPointers
== 0 {
1792 if etyp
.kind
&kindGCProg
!= 0 {
1793 panic("reflect: unexpected GC program in MapOf")
1795 emask
:= (*[16]byte)(unsafe
.Pointer(etyp
.gcdata
))
1796 for i
:= uintptr(0); i
< etyp
.ptrdata
/ptrSize
; i
++ {
1797 if (emask
[i
/8]>>(i%8
))&1 != 0 {
1798 for j
:= uintptr(0); j
< bucketSize
; j
++ {
1799 word
:= base
+ j
*etyp
.size
/ptrSize
+ i
1800 mask
[word
/8] |
= 1 << (word
% 8)
1806 word
:= ovoff
/ ptrSize
1807 mask
[word
/8] |
= 1 << (word
% 8)
1809 ptrdata
= (word
+ 1) * ptrSize
1811 // overflow word must be last
1812 if ptrdata
!= size
{
1813 panic("reflect: bad layout computation in MapOf")
1818 align
: int8(maxAlign
),
1819 fieldAlign
: uint8(maxAlign
),
1825 s
:= "bucket(" + *ktyp
.string + "," + *etyp
.string + ")"
1830 // SliceOf returns the slice type with element type t.
1831 // For example, if t represents int, SliceOf(t) represents []int.
1832 func SliceOf(t Type
) Type
{
1836 ckey
:= cacheKey
{Slice
, typ
, nil, 0}
1837 if slice
, ok
:= lookupCache
.Load(ckey
); ok
{
1841 // Look in known types.
1842 s
:= "[]" + *typ
.string
1844 // Make a slice type.
1845 var islice
interface{} = ([]unsafe
.Pointer
)(nil)
1846 prototype
:= *(**sliceType
)(unsafe
.Pointer(&islice
))
1850 // gccgo uses a different hash.
1851 // slice.hash = fnv1(typ.hash, '[')
1852 slice
.hash
= typ
.hash
+ 1 + 13
1855 slice
.uncommonType
= nil
1856 slice
.ptrToThis
= nil
1858 ti
, _
:= lookupCache
.LoadOrStore(ckey
, &slice
.rtype
)
1862 // The structLookupCache caches StructOf lookups.
1863 // StructOf does not share the common lookupCache since we need to pin
1864 // the memory associated with *structTypeFixedN.
1865 var structLookupCache
struct {
1866 sync
.Mutex
// Guards stores (but not loads) on m.
1868 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
1869 // Elements in m are append-only and thus safe for concurrent reading.
1873 // isLetter returns true if a given 'rune' is classified as a Letter.
1874 func isLetter(ch rune
) bool {
1875 return 'a' <= ch
&& ch
<= 'z' ||
'A' <= ch
&& ch
<= 'Z' || ch
== '_' || ch
>= utf8
.RuneSelf
&& unicode
.IsLetter(ch
)
1878 // isValidFieldName checks if a string is a valid (struct) field name or not.
1880 // According to the language spec, a field name should be an identifier.
1882 // identifier = letter { letter | unicode_digit } .
1883 // letter = unicode_letter | "_" .
1884 func isValidFieldName(fieldName
string) bool {
1885 for i
, c
:= range fieldName
{
1886 if i
== 0 && !isLetter(c
) {
1890 if !(isLetter(c
) || unicode
.IsDigit(c
)) {
1895 return len(fieldName
) > 0
1898 // StructOf returns the struct type containing fields.
1899 // The Offset and Index fields are ignored and computed as they would be
1902 // StructOf currently does not generate wrapper methods for embedded fields.
1903 // This limitation may be lifted in a future version.
1904 func StructOf(fields
[]StructField
) Type
{
1912 fs
= make([]structField
, len(fields
))
1913 repr
= make([]byte, 0, 64)
1914 fset
= map[string]struct{}{} // fields' names
1916 hasPtr
= false // records whether at least one struct-field is a pointer
1917 hasGCProg
= false // records whether a struct-field type has a GCProg
1920 lastzero
:= uintptr(0)
1921 repr
= append(repr
, "struct {"...)
1922 for i
, field
:= range fields
{
1923 if field
.Name
== "" {
1924 panic("reflect.StructOf: field " + strconv
.Itoa(i
) + " has no name")
1926 if !isValidFieldName(field
.Name
) {
1927 panic("reflect.StructOf: field " + strconv
.Itoa(i
) + " has invalid name")
1929 if field
.Type
== nil {
1930 panic("reflect.StructOf: field " + strconv
.Itoa(i
) + " has no type")
1932 f
:= runtimeStructField(field
)
1934 if ft
.kind
&kindGCProg
!= 0 {
1941 // Update string and hash
1943 hash
= (hash
<< 1) + ft
.hash
1945 repr
= append(repr
, (" " + name
)...)
1948 repr
= append(repr
, " ?"...)
1949 if f
.typ
.Kind() == Ptr
{
1950 // Embedded ** and *interface{} are illegal
1952 if k
:= elem
.Kind(); k
== Ptr || k
== Interface
{
1953 panic("reflect.StructOf: illegal anonymous field type " + ft
.String())
1955 name
= elem
.String()
1960 switch f
.typ
.Kind() {
1962 ift
:= (*interfaceType
)(unsafe
.Pointer(ft
))
1963 if len(ift
.methods
) > 0 {
1964 panic("reflect.StructOf: embedded field with methods not implemented")
1967 ptr
:= (*ptrType
)(unsafe
.Pointer(ft
))
1968 if unt
:= ptr
.uncommon(); unt
!= nil {
1969 if len(unt
.methods
) > 0 {
1970 panic("reflect.StructOf: embedded field with methods not implemented")
1973 if unt
:= ptr
.elem
.uncommon(); unt
!= nil {
1974 if len(unt
.methods
) > 0 {
1975 panic("reflect.StructOf: embedded field with methods not implemented")
1979 if unt
:= ft
.uncommon(); unt
!= nil {
1980 if len(unt
.methods
) > 0 {
1981 panic("reflect.StructOf: embedded field with methods not implemented")
1986 if _
, dup
:= fset
[name
]; dup
{
1987 panic("reflect.StructOf: duplicate field " + name
)
1989 fset
[name
] = struct{}{}
1991 repr
= append(repr
, (" " + ft
.String())...)
1993 repr
= append(repr
, (" " + strconv
.Quote(*f
.tag
))...)
1995 if i
< len(fields
)-1 {
1996 repr
= append(repr
, ';')
1999 comparable
= comparable
&& (ft
.equalfn
!= nil)
2000 hashable
= hashable
&& (ft
.hashfn
!= nil)
2002 offset
:= align(size
, uintptr(ft
.fieldAlign
))
2003 if int8(ft
.fieldAlign
) > typalign
{
2004 typalign
= int8(ft
.fieldAlign
)
2006 size
= offset
+ ft
.size
2007 f
.offsetAnon |
= offset
<< 1
2016 if size
> 0 && lastzero
== size
{
2017 // This is a non-zero sized struct that ends in a
2018 // zero-sized field. We add an extra byte of padding,
2019 // to ensure that taking the address of the final
2020 // zero-sized field can't manufacture a pointer to the
2021 // next object in the heap. See issue 9401.
2026 repr
= append(repr
, ' ')
2028 repr
= append(repr
, '}')
2032 // Round the size up to be a multiple of the alignment.
2033 size
= align(size
, uintptr(typalign
))
2035 // Make the struct type.
2036 var istruct
interface{} = struct{}{}
2037 prototype
:= *(**structType
)(unsafe
.Pointer(&istruct
))
2038 typ
:= new(structType
)
2043 if ts
, ok
:= structLookupCache
.m
.Load(hash
); ok
{
2044 for _
, st
:= range ts
.([]Type
) {
2046 if haveIdenticalUnderlyingType(&typ
.rtype
, t
, true) {
2052 // Not in cache, lock and retry.
2053 structLookupCache
.Lock()
2054 defer structLookupCache
.Unlock()
2055 if ts
, ok
:= structLookupCache
.m
.Load(hash
); ok
{
2056 for _
, st
:= range ts
.([]Type
) {
2058 if haveIdenticalUnderlyingType(&typ
.rtype
, t
, true) {
2064 addToCache
:= func(t Type
) Type
{
2066 if ti
, ok
:= structLookupCache
.m
.Load(hash
); ok
{
2069 structLookupCache
.m
.Store(hash
, append(ts
, t
))
2076 typ
.align
= typalign
2077 typ
.fieldAlign
= uint8(typalign
)
2079 typ
.kind |
= kindNoPointers
2081 typ
.kind
&^= kindNoPointers
2086 for i
, ft
:= range fs
{
2087 if ft
.typ
.pointers() {
2091 prog
:= []byte{0, 0, 0, 0} // will be length of prog
2092 for i
, ft
:= range fs
{
2093 if i
> lastPtrField
{
2094 // gcprog should not include anything for any field after
2095 // the last field that contains pointer data
2098 // FIXME(sbinet) handle padding, fields smaller than a word
2099 elemGC
:= (*[1 << 30]byte)(unsafe
.Pointer(ft
.typ
.gcdata
))[:]
2100 elemPtrs
:= ft
.typ
.ptrdata
/ ptrSize
2102 case ft
.typ
.kind
&kindGCProg
== 0 && ft
.typ
.ptrdata
!= 0:
2103 // Element is small with pointer mask; use as literal bits.
2105 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2107 for n
:= elemPtrs
; n
> 120; n
-= 120 {
2108 prog
= append(prog
, 120)
2109 prog
= append(prog
, mask
[:15]...)
2112 prog
= append(prog
, byte(n
))
2113 prog
= append(prog
, mask
[:(n
+7)/8]...)
2114 case ft
.typ
.kind
&kindGCProg
!= 0:
2115 // Element has GC program; emit one element.
2116 elemProg
:= elemGC
[4 : 4+*(*uint32)(unsafe
.Pointer(&elemGC
[0]))-1]
2117 prog
= append(prog
, elemProg
...)
2119 // Pad from ptrdata to size.
2120 elemWords
:= ft
.typ
.size
/ ptrSize
2121 if elemPtrs
< elemWords
{
2122 // Emit literal 0 bit, then repeat as needed.
2123 prog
= append(prog
, 0x01, 0x00)
2124 if elemPtrs
+1 < elemWords
{
2125 prog
= append(prog
, 0x81)
2126 prog
= appendVarint(prog
, elemWords
-elemPtrs
-1)
2130 *(*uint32)(unsafe
.Pointer(&prog
[0])) = uint32(len(prog
) - 4)
2131 typ
.kind |
= kindGCProg
2132 typ
.gcdata
= &prog
[0]
2134 typ
.kind
&^= kindGCProg
2135 bv
:= new(bitVector
)
2136 addTypeBits(bv
, 0, typ
.common())
2137 if len(bv
.data
) > 0 {
2138 typ
.gcdata
= &bv
.data
[0]
2141 typ
.ptrdata
= typeptrdata(typ
.common())
2144 typ
.hashfn
= func(p unsafe
.Pointer
, seed
uintptr) uintptr {
2146 for _
, ft
:= range typ
.fields
{
2147 pi
:= add(p
, ft
.offset(), "&x.field safe")
2148 o
= ft
.typ
.hashfn(pi
, o
)
2157 typ
.equalfn
= func(p
, q unsafe
.Pointer
) bool {
2158 for _
, ft
:= range typ
.fields
{
2159 pi
:= add(p
, ft
.offset(), "&x.field safe")
2160 qi
:= add(q
, ft
.offset(), "&x.field safe")
2161 if !ft
.typ
.equalfn(pi
, qi
) {
2171 typ
.kind
&^= kindDirectIface
2172 typ
.uncommonType
= nil
2175 return addToCache(&typ
.rtype
)
2178 func runtimeStructField(field StructField
) structField
{
2179 if field
.PkgPath
!= "" {
2180 panic("reflect.StructOf: StructOf does not allow unexported fields")
2183 // Best-effort check for misuse.
2184 // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
2186 if 'a' <= c
&& c
<= 'z' || c
== '_' {
2187 panic("reflect.StructOf: field \"" + field
.Name
+ "\" is unexported but missing PkgPath")
2190 offsetAnon
:= uintptr(0)
2191 if field
.Anonymous
{
2199 if field
.Tag
!= "" {
2200 st
:= string(field
.Tag
)
2207 typ
: field
.Type
.common(),
2209 offsetAnon
: offsetAnon
,
2213 // typeptrdata returns the length in bytes of the prefix of t
2214 // containing pointer data. Anything after this offset is scalar data.
2215 // keep in sync with ../cmd/compile/internal/gc/reflect.go
2216 func typeptrdata(t
*rtype
) uintptr {
2222 st
:= (*structType
)(unsafe
.Pointer(t
))
2223 // find the last field that has pointers.
2225 for i
:= range st
.fields
{
2226 ft
:= st
.fields
[i
].typ
2231 f
:= st
.fields
[field
]
2232 return f
.offset() + f
.typ
.ptrdata
2235 panic("reflect.typeptrdata: unexpected type, " + t
.String())
2239 // See cmd/compile/internal/gc/reflect.go for derivation of constant.
2240 const maxPtrmaskBytes
= 2048
2242 // ArrayOf returns the array type with the given count and element type.
2243 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2245 // If the resulting type would be larger than the available address space,
2247 func ArrayOf(count
int, elem Type
) Type
{
2248 typ
:= elem
.(*rtype
)
2251 ckey
:= cacheKey
{Array
, typ
, nil, uintptr(count
)}
2252 if array
, ok
:= lookupCache
.Load(ckey
); ok
{
2256 // Look in known types.
2257 s
:= "[" + strconv
.Itoa(count
) + "]" + *typ
.string
2259 // Make an array type.
2260 var iarray
interface{} = [1]unsafe
.Pointer
{}
2261 prototype
:= *(**arrayType
)(unsafe
.Pointer(&iarray
))
2265 // gccgo uses a different hash.
2266 // array.hash = fnv1(typ.hash, '[')
2267 // for n := uint32(count); n > 0; n >>= 8 {
2268 // array.hash = fnv1(array.hash, byte(n))
2270 // array.hash = fnv1(array.hash, ']')
2271 array
.hash
= typ
.hash
+ 1 + 13
2274 array
.ptrToThis
= nil
2276 max
:= ^uintptr(0) / typ
.size
2277 if uintptr(count
) > max
{
2278 panic("reflect.ArrayOf: array size would exceed virtual address space")
2281 array
.size
= typ
.size
* uintptr(count
)
2282 if count
> 0 && typ
.ptrdata
!= 0 {
2283 array
.ptrdata
= typ
.size
*uintptr(count
-1) + typ
.ptrdata
2285 array
.align
= typ
.align
2286 array
.fieldAlign
= typ
.fieldAlign
2287 array
.uncommonType
= nil
2288 array
.len = uintptr(count
)
2289 array
.slice
= SliceOf(elem
).(*rtype
)
2291 array
.kind
&^= kindNoPointers
2293 case typ
.kind
&kindNoPointers
!= 0 || array
.size
== 0:
2295 array
.kind |
= kindNoPointers
2300 // In memory, 1-element array looks just like the element.
2301 array
.kind |
= typ
.kind
& kindGCProg
2302 array
.gcdata
= typ
.gcdata
2303 array
.ptrdata
= typ
.ptrdata
2305 case typ
.kind
&kindGCProg
== 0 && array
.size
<= maxPtrmaskBytes
*8*ptrSize
:
2306 // Element is small with pointer mask; array is still small.
2307 // Create direct pointer mask by turning each 1 bit in elem
2308 // into count 1 bits in larger mask.
2309 mask
:= make([]byte, (array
.ptrdata
/ptrSize
+7)/8)
2310 elemMask
:= (*[1 << 30]byte)(unsafe
.Pointer(typ
.gcdata
))[:]
2311 elemWords
:= typ
.size
/ ptrSize
2312 for j
:= uintptr(0); j
< typ
.ptrdata
/ptrSize
; j
++ {
2313 if (elemMask
[j
/8]>>(j%8
))&1 != 0 {
2314 for i
:= uintptr(0); i
< array
.len; i
++ {
2315 k
:= i
*elemWords
+ j
2316 mask
[k
/8] |
= 1 << (k
% 8)
2320 array
.gcdata
= &mask
[0]
2323 // Create program that emits one element
2324 // and then repeats to make the array.
2325 prog
:= []byte{0, 0, 0, 0} // will be length of prog
2326 elemGC
:= (*[1 << 30]byte)(unsafe
.Pointer(typ
.gcdata
))[:]
2327 elemPtrs
:= typ
.ptrdata
/ ptrSize
2328 if typ
.kind
&kindGCProg
== 0 {
2329 // Element is small with pointer mask; use as literal bits.
2331 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2333 for n
= elemPtrs
; n
> 120; n
-= 120 {
2334 prog
= append(prog
, 120)
2335 prog
= append(prog
, mask
[:15]...)
2338 prog
= append(prog
, byte(n
))
2339 prog
= append(prog
, mask
[:(n
+7)/8]...)
2341 // Element has GC program; emit one element.
2342 elemProg
:= elemGC
[4 : 4+*(*uint32)(unsafe
.Pointer(&elemGC
[0]))-1]
2343 prog
= append(prog
, elemProg
...)
2345 // Pad from ptrdata to size.
2346 elemWords
:= typ
.size
/ ptrSize
2347 if elemPtrs
< elemWords
{
2348 // Emit literal 0 bit, then repeat as needed.
2349 prog
= append(prog
, 0x01, 0x00)
2350 if elemPtrs
+1 < elemWords
{
2351 prog
= append(prog
, 0x81)
2352 prog
= appendVarint(prog
, elemWords
-elemPtrs
-1)
2355 // Repeat count-1 times.
2356 if elemWords
< 0x80 {
2357 prog
= append(prog
, byte(elemWords|
0x80))
2359 prog
= append(prog
, 0x80)
2360 prog
= appendVarint(prog
, elemWords
)
2362 prog
= appendVarint(prog
, uintptr(count
)-1)
2363 prog
= append(prog
, 0)
2364 *(*uint32)(unsafe
.Pointer(&prog
[0])) = uint32(len(prog
) - 4)
2365 array
.kind |
= kindGCProg
2366 array
.gcdata
= &prog
[0]
2367 array
.ptrdata
= array
.size
// overestimate but ok; must match program
2370 array
.kind
&^= kindDirectIface
2374 if typ
.equalfn
== nil {
2377 eequal
:= typ
.equalfn
2378 array
.equalfn
= func(p
, q unsafe
.Pointer
) bool {
2379 for i
:= 0; i
< count
; i
++ {
2380 pi
:= arrayAt(p
, i
, esize
, "i < count")
2381 qi
:= arrayAt(q
, i
, esize
, "i < count")
2382 if !eequal(pi
, qi
) {
2390 if typ
.hashfn
== nil {
2394 array
.hashfn
= func(ptr unsafe
.Pointer
, seed
uintptr) uintptr {
2396 for i
:= 0; i
< count
; i
++ {
2397 o
= ehash(arrayAt(ptr
, i
, esize
, "i < count"), o
)
2403 ti
, _
:= lookupCache
.LoadOrStore(ckey
, &array
.rtype
)
2407 func appendVarint(x
[]byte, v
uintptr) []byte {
2408 for ; v
>= 0x80; v
>>= 7 {
2409 x
= append(x
, byte(v|
0x80))
2411 x
= append(x
, byte(v
))
2415 // toType converts from a *rtype to a Type that can be returned
2416 // to the client of package reflect. In gc, the only concern is that
2417 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2418 // function takes care of ensuring that multiple *rtype for the same
2419 // type are coalesced into a single Type.
2420 var canonicalType
= make(map[string]Type
)
2422 var canonicalTypeLock sync
.RWMutex
2424 func canonicalize(t Type
) Type
{
2429 canonicalTypeLock
.RLock()
2430 if r
, ok
:= canonicalType
[s
]; ok
{
2431 canonicalTypeLock
.RUnlock()
2434 canonicalTypeLock
.RUnlock()
2435 canonicalTypeLock
.Lock()
2436 if r
, ok
:= canonicalType
[s
]; ok
{
2437 canonicalTypeLock
.Unlock()
2440 canonicalType
[s
] = t
2441 canonicalTypeLock
.Unlock()
2445 func toType(p
*rtype
) Type
{
2449 return canonicalize(p
)
2452 // ifaceIndir reports whether t is stored indirectly in an interface value.
2453 func ifaceIndir(t
*rtype
) bool {
2454 return t
.kind
&kindDirectIface
== 0
2457 // Layout matches runtime.gobitvector (well enough).
2458 type bitVector
struct {
2459 n
uint32 // number of bits
2463 // append a bit to the bitmap.
2464 func (bv
*bitVector
) append(bit
uint8) {
2466 bv
.data
= append(bv
.data
, 0)
2468 bv
.data
[bv
.n
/8] |
= bit
<< (bv
.n
% 8)
2472 func addTypeBits(bv
*bitVector
, offset
uintptr, t
*rtype
) {
2473 if t
.kind
&kindNoPointers
!= 0 {
2477 switch Kind(t
.kind
& kindMask
) {
2478 case Chan
, Func
, Map
, Ptr
, Slice
, String
, UnsafePointer
:
2479 // 1 pointer at start of representation
2480 for bv
.n
< uint32(offset
/uintptr(ptrSize
)) {
2487 for bv
.n
< uint32(offset
/uintptr(ptrSize
)) {
2494 // repeat inner type
2495 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
2496 for i
:= 0; i
< int(tt
.len); i
++ {
2497 addTypeBits(bv
, offset
+uintptr(i
)*tt
.elem
.size
, tt
.elem
)
2502 tt
:= (*structType
)(unsafe
.Pointer(t
))
2503 for i
:= range tt
.fields
{
2505 addTypeBits(bv
, offset
+f
.offset(), f
.typ
)