1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
26 // Type is the representation of a Go type.
28 // Not all methods apply to all kinds of types. Restrictions,
29 // if any, are noted in the documentation for each method.
30 // Use the Kind method to find out the kind of type before
31 // calling kind-specific methods. Calling a method
32 // inappropriate to the kind of type causes a run-time panic.
34 // Type values are comparable, such as with the == operator,
35 // so they can be used as map keys.
36 // Two Type values are equal if they represent identical types.
38 // Methods applicable to all types.
40 // Align returns the alignment in bytes of a value of
41 // this type when allocated in memory.
44 // FieldAlign returns the alignment in bytes of a value of
45 // this type when used as a field in a struct.
48 // Method returns the i'th method in the type's method set.
49 // It panics if i is not in the range [0, NumMethod()).
51 // For a non-interface type T or *T, the returned Method's Type and Func
52 // fields describe a function whose first argument is the receiver.
54 // For an interface type, the returned Method's Type field gives the
55 // method signature, without a receiver, and the Func field is nil.
58 // MethodByName returns the method with that name in the type's
59 // method set and a boolean indicating if the method was found.
61 // For a non-interface type T or *T, the returned Method's Type and Func
62 // fields describe a function whose first argument is the receiver.
64 // For an interface type, the returned Method's Type field gives the
65 // method signature, without a receiver, and the Func field is nil.
66 MethodByName(string) (Method
, bool)
68 // NumMethod returns the number of exported methods in the type's method set.
71 // Name returns the type's name within its package for a defined type.
72 // For other (non-defined) types it returns the empty string.
75 // PkgPath returns a defined type's package path, that is, the import path
76 // that uniquely identifies the package, such as "encoding/base64".
77 // If the type was predeclared (string, error) or not defined (*T, struct{},
78 // []int, or A where A is an alias for a non-defined type), the package path
79 // will be the empty string.
82 // Size returns the number of bytes needed to store
83 // a value of the given type; it is analogous to unsafe.Sizeof.
86 // String returns a string representation of the type.
87 // The string representation may use shortened package names
88 // (e.g., base64 instead of "encoding/base64") and is not
89 // guaranteed to be unique among types. To test for type identity,
90 // compare the Types directly.
93 // Used internally by gccgo--the string retaining quoting.
96 // Kind returns the specific kind of this type.
99 // Implements reports whether the type implements the interface type u.
100 Implements(u Type
) bool
102 // AssignableTo reports whether a value of the type is assignable to type u.
103 AssignableTo(u Type
) bool
105 // ConvertibleTo reports whether a value of the type is convertible to type u.
106 ConvertibleTo(u Type
) bool
108 // Comparable reports whether values of this type are comparable.
111 // Methods applicable only to some types, depending on Kind.
112 // The methods allowed for each kind are:
114 // Int*, Uint*, Float*, Complex*: Bits
116 // Chan: ChanDir, Elem
117 // Func: In, NumIn, Out, NumOut, IsVariadic.
121 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
123 // Bits returns the size of the type in bits.
124 // It panics if the type's Kind is not one of the
125 // sized or unsized Int, Uint, Float, or Complex kinds.
128 // ChanDir returns a channel type's direction.
129 // It panics if the type's Kind is not Chan.
132 // IsVariadic reports whether a function type's final input parameter
133 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
134 // implicit actual type []T.
136 // For concreteness, if t represents func(x int, y ... float64), then
139 // t.In(0) is the reflect.Type for "int"
140 // t.In(1) is the reflect.Type for "[]float64"
141 // t.IsVariadic() == true
143 // IsVariadic panics if the type's Kind is not Func.
146 // Elem returns a type's element type.
147 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
150 // Field returns a struct type's i'th field.
151 // It panics if the type's Kind is not Struct.
152 // It panics if i is not in the range [0, NumField()).
153 Field(i
int) StructField
155 // FieldByIndex returns the nested field corresponding
156 // to the index sequence. It is equivalent to calling Field
157 // successively for each index i.
158 // It panics if the type's Kind is not Struct.
159 FieldByIndex(index
[]int) StructField
161 // FieldByName returns the struct field with the given name
162 // and a boolean indicating if the field was found.
163 FieldByName(name
string) (StructField
, bool)
165 // FieldByNameFunc returns the struct field with a name
166 // that satisfies the match function and a boolean indicating if
167 // the field was found.
169 // FieldByNameFunc considers the fields in the struct itself
170 // and then the fields in any embedded structs, in breadth first order,
171 // stopping at the shallowest nesting depth containing one or more
172 // fields satisfying the match function. If multiple fields at that depth
173 // satisfy the match function, they cancel each other
174 // and FieldByNameFunc returns no match.
175 // This behavior mirrors Go's handling of name lookup in
176 // structs containing embedded fields.
177 FieldByNameFunc(match
func(string) bool) (StructField
, bool)
179 // In returns the type of a function type's i'th input parameter.
180 // It panics if the type's Kind is not Func.
181 // It panics if i is not in the range [0, NumIn()).
184 // Key returns a map type's key type.
185 // It panics if the type's Kind is not Map.
188 // Len returns an array type's length.
189 // It panics if the type's Kind is not Array.
192 // NumField returns a struct type's field count.
193 // It panics if the type's Kind is not Struct.
196 // NumIn returns a function type's input parameter count.
197 // It panics if the type's Kind is not Func.
200 // NumOut returns a function type's output parameter count.
201 // It panics if the type's Kind is not Func.
204 // Out returns the type of a function type's i'th output parameter.
205 // It panics if the type's Kind is not Func.
206 // It panics if i is not in the range [0, NumOut()).
210 uncommon() *uncommonType
213 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
214 // if the names are equal, even if they are unexported names originating
215 // in different packages. The practical effect of this is that the result of
216 // t.FieldByName("x") is not well defined if the struct type t contains
217 // multiple fields named x (embedded from different packages).
218 // FieldByName may return one of the fields named x or may report that there are none.
219 // See https://golang.org/issue/4876 for more details.
222 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
223 * A few are known to ../runtime/type.go to convey to debuggers.
224 * They are also known to ../runtime/type.go.
227 // A Kind represents the specific kind of type that a Type represents.
228 // The zero Kind is not a valid kind.
261 // rtype is the common implementation of most values.
262 // It is embedded in other struct types.
264 // rtype must be kept in sync with ../runtime/type.go:/^type._type.
267 ptrdata
uintptr // size of memory prefix holding all pointers
268 hash
uint32 // hash of type; avoids computation in hash tables
269 kind
uint8 // enumeration for C
270 align
int8 // alignment of variable with this type
271 fieldAlign
uint8 // alignment of struct field with this type
272 _
uint8 // unused/padding
274 hashfn
func(unsafe
.Pointer
, uintptr) uintptr // hash function
275 equalfn
func(unsafe
.Pointer
, unsafe
.Pointer
) bool // equality function
277 gcdata
*byte // garbage collection data
278 string *string // string form; unnecessary but undeniably useful
279 *uncommonType
// (relatively) uncommon fields
280 ptrToThis
*rtype
// type for pointer to this type, if used in binary or has methods
283 // Method on non-interface type
285 name
*string // name of method
286 pkgPath
*string // nil for exported Names; otherwise import path
287 mtyp
*rtype
// method type (without receiver)
288 typ
*rtype
// .(*FuncType) underneath (with receiver)
289 tfn unsafe
.Pointer
// fn used for normal method call
292 // uncommonType is present only for defined types or types with methods
293 // (if T is a defined type, the uncommonTypes for T and *T have methods).
294 // Using a pointer to this struct reduces the overall size required
295 // to describe a non-defined type with no methods.
296 type uncommonType
struct {
297 name
*string // name of type
298 pkgPath
*string // import path; nil for built-in types like int, string
299 methods
[]method
// methods associated with type
302 // ChanDir represents a channel type's direction.
306 RecvDir ChanDir
= 1 << iota // <-chan
308 BothDir
= RecvDir | SendDir
// chan
311 // arrayType represents a fixed array type.
312 type arrayType
struct {
314 elem
*rtype
// array element type
315 slice
*rtype
// slice type
319 // chanType represents a channel type.
320 type chanType
struct {
322 elem
*rtype
// channel element type
323 dir
uintptr // channel direction (ChanDir)
326 // funcType represents a function type.
327 type funcType
struct {
329 dotdotdot
bool // last input parameter is ...
330 in
[]*rtype
// input parameter types
331 out
[]*rtype
// output parameter types
334 // imethod represents a method on an interface type
335 type imethod
struct {
336 name
*string // name of method
337 pkgPath
*string // nil for exported Names; otherwise import path
338 typ
*rtype
// .(*FuncType) underneath
341 // interfaceType represents an interface type.
342 type interfaceType
struct {
344 methods
[]imethod
// sorted by hash
347 // mapType represents a map type.
348 type mapType
struct {
350 key
*rtype
// map key type
351 elem
*rtype
// map element (value) type
352 bucket
*rtype
// internal bucket structure
353 keysize
uint8 // size of key slot
354 indirectkey
uint8 // store ptr to key instead of key itself
355 valuesize
uint8 // size of value slot
356 indirectvalue
uint8 // store ptr to value instead of value itself
357 bucketsize
uint16 // size of bucket
358 reflexivekey
bool // true if k==k for all keys
359 needkeyupdate
bool // true if we need to update key on an overwrite
362 // ptrType represents a pointer type.
363 type ptrType
struct {
365 elem
*rtype
// pointer element (pointed at) type
368 // sliceType represents a slice type.
369 type sliceType
struct {
371 elem
*rtype
// slice element type
375 type structField
struct {
376 name
*string // name is always non-empty
377 pkgPath
*string // nil for exported Names; otherwise import path
378 typ
*rtype
// type of field
379 tag
*string // nil if no tag
380 offsetEmbed
uintptr // byte offset of field<<1 | isAnonymous
383 func (f
*structField
) offset() uintptr {
384 return f
.offsetEmbed
>> 1
387 func (f
*structField
) embedded() bool {
388 return f
.offsetEmbed
&1 != 0
391 // structType represents a struct type.
392 type structType
struct {
394 fields
[]structField
// sorted by offset
398 * The compiler knows the exact layout of all the data structures above.
399 * The compiler does not know about the data structures and methods below.
402 // Method represents a single method.
404 // Name is the method name.
405 // PkgPath is the package path that qualifies a lower case (unexported)
406 // method name. It is empty for upper case (exported) method names.
407 // The combination of PkgPath and Name uniquely identifies a method
409 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
413 Type Type
// method type
414 Func Value
// func with receiver as first argument
415 Index
int // index for Type.Method
419 kindDirectIface
= 1 << 5
420 kindGCProg
= 1 << 6 // Type.gc points to GC program
421 kindNoPointers
= 1 << 7
422 kindMask
= (1 << 5) - 1
425 func (k Kind
) String() string {
426 if int(k
) < len(kindNames
) {
429 return "kind" + strconv
.Itoa(int(k
))
432 var kindNames
= []string{
448 Complex64
: "complex64",
449 Complex128
: "complex128",
453 Interface
: "interface",
459 UnsafePointer
: "unsafe.Pointer",
462 func (t
*uncommonType
) uncommon() *uncommonType
{
466 func (t
*uncommonType
) PkgPath() string {
467 if t
== nil || t
.pkgPath
== nil {
473 func (t
*uncommonType
) Name() string {
474 if t
== nil || t
.name
== nil {
480 var methodCache sync
.Map
// map[*uncommonType][]method
482 func (t
*uncommonType
) exportedMethods() []method
{
483 methodsi
, found
:= methodCache
.Load(t
)
485 return methodsi
.([]method
)
490 for _
, m
:= range allm
{
491 if m
.pkgPath
!= nil {
500 methods
= make([]method
, 0, len(allm
))
501 for _
, m
:= range allm
{
502 if m
.pkgPath
== nil {
503 methods
= append(methods
, m
)
506 methods
= methods
[:len(methods
):len(methods
)]
509 methodsi
, _
= methodCache
.LoadOrStore(t
, methods
)
510 return methodsi
.([]method
)
513 func (t
*rtype
) rawString() string { return *t
.string }
515 func (t
*rtype
) String() string {
516 // For gccgo, strip out quoted strings.
519 r
:= make([]byte, len(s
))
521 for i
:= 0; i
< len(s
); i
++ {
532 func (t
*rtype
) Size() uintptr { return t
.size
}
534 func (t
*rtype
) Bits() int {
536 panic("reflect: Bits of nil Type")
539 if k
< Int || k
> Complex128
{
540 panic("reflect: Bits of non-arithmetic Type " + t
.String())
542 return int(t
.size
) * 8
545 func (t
*rtype
) Align() int { return int(t
.align
) }
547 func (t
*rtype
) FieldAlign() int { return int(t
.fieldAlign
) }
549 func (t
*rtype
) Kind() Kind
{ return Kind(t
.kind
& kindMask
) }
551 func (t
*rtype
) pointers() bool { return t
.kind
&kindNoPointers
== 0 }
553 func (t
*rtype
) common() *rtype
{ return t
}
555 func (t
*rtype
) exportedMethods() []method
{
560 return ut
.exportedMethods()
563 func (t
*rtype
) NumMethod() int {
564 if t
.Kind() == Interface
{
565 tt
:= (*interfaceType
)(unsafe
.Pointer(t
))
566 return tt
.NumMethod()
568 return len(t
.exportedMethods())
571 func (t
*rtype
) Method(i
int) (m Method
) {
572 if t
.Kind() == Interface
{
573 tt
:= (*interfaceType
)(unsafe
.Pointer(t
))
576 methods
:= t
.exportedMethods()
577 if i
< 0 || i
>= len(methods
) {
578 panic("reflect: Method index out of range")
587 x
:= new(unsafe
.Pointer
)
588 *x
= unsafe
.Pointer(&p
.tfn
)
589 m
.Func
= Value
{mt
, unsafe
.Pointer(x
), fl | flagIndir | flagMethodFn
}
594 func (t
*rtype
) MethodByName(name
string) (m Method
, ok
bool) {
595 if t
.Kind() == Interface
{
596 tt
:= (*interfaceType
)(unsafe
.Pointer(t
))
597 return tt
.MethodByName(name
)
601 return Method
{}, false
603 utmethods
:= ut
.methods
605 for i
:= 0; i
< len(utmethods
); i
++ {
607 if p
.pkgPath
== nil {
608 if p
.name
!= nil && *p
.name
== name
{
609 return t
.Method(eidx
), true
614 return Method
{}, false
617 func (t
*rtype
) PkgPath() string {
618 return t
.uncommonType
.PkgPath()
621 func (t
*rtype
) Name() string {
622 return t
.uncommonType
.Name()
625 func (t
*rtype
) ChanDir() ChanDir
{
626 if t
.Kind() != Chan
{
627 panic("reflect: ChanDir of non-chan type")
629 tt
:= (*chanType
)(unsafe
.Pointer(t
))
630 return ChanDir(tt
.dir
)
633 func (t
*rtype
) IsVariadic() bool {
634 if t
.Kind() != Func
{
635 panic("reflect: IsVariadic of non-func type")
637 tt
:= (*funcType
)(unsafe
.Pointer(t
))
641 func (t
*rtype
) Elem() Type
{
644 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
645 return toType(tt
.elem
)
647 tt
:= (*chanType
)(unsafe
.Pointer(t
))
648 return toType(tt
.elem
)
650 tt
:= (*mapType
)(unsafe
.Pointer(t
))
651 return toType(tt
.elem
)
653 tt
:= (*ptrType
)(unsafe
.Pointer(t
))
654 return toType(tt
.elem
)
656 tt
:= (*sliceType
)(unsafe
.Pointer(t
))
657 return toType(tt
.elem
)
659 panic("reflect: Elem of invalid type")
662 func (t
*rtype
) Field(i
int) StructField
{
663 if t
.Kind() != Struct
{
664 panic("reflect: Field of non-struct type")
666 tt
:= (*structType
)(unsafe
.Pointer(t
))
670 func (t
*rtype
) FieldByIndex(index
[]int) StructField
{
671 if t
.Kind() != Struct
{
672 panic("reflect: FieldByIndex of non-struct type")
674 tt
:= (*structType
)(unsafe
.Pointer(t
))
675 return tt
.FieldByIndex(index
)
678 func (t
*rtype
) FieldByName(name
string) (StructField
, bool) {
679 if t
.Kind() != Struct
{
680 panic("reflect: FieldByName of non-struct type")
682 tt
:= (*structType
)(unsafe
.Pointer(t
))
683 return tt
.FieldByName(name
)
686 func (t
*rtype
) FieldByNameFunc(match
func(string) bool) (StructField
, bool) {
687 if t
.Kind() != Struct
{
688 panic("reflect: FieldByNameFunc of non-struct type")
690 tt
:= (*structType
)(unsafe
.Pointer(t
))
691 return tt
.FieldByNameFunc(match
)
694 func (t
*rtype
) In(i
int) Type
{
695 if t
.Kind() != Func
{
696 panic("reflect: In of non-func type")
698 tt
:= (*funcType
)(unsafe
.Pointer(t
))
699 return toType(tt
.in
[i
])
702 func (t
*rtype
) Key() Type
{
704 panic("reflect: Key of non-map type")
706 tt
:= (*mapType
)(unsafe
.Pointer(t
))
707 return toType(tt
.key
)
710 func (t
*rtype
) Len() int {
711 if t
.Kind() != Array
{
712 panic("reflect: Len of non-array type")
714 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
718 func (t
*rtype
) NumField() int {
719 if t
.Kind() != Struct
{
720 panic("reflect: NumField of non-struct type")
722 tt
:= (*structType
)(unsafe
.Pointer(t
))
723 return len(tt
.fields
)
726 func (t
*rtype
) NumIn() int {
727 if t
.Kind() != Func
{
728 panic("reflect: NumIn of non-func type")
730 tt
:= (*funcType
)(unsafe
.Pointer(t
))
734 func (t
*rtype
) NumOut() int {
735 if t
.Kind() != Func
{
736 panic("reflect: NumOut of non-func type")
738 tt
:= (*funcType
)(unsafe
.Pointer(t
))
742 func (t
*rtype
) Out(i
int) Type
{
743 if t
.Kind() != Func
{
744 panic("reflect: Out of non-func type")
746 tt
:= (*funcType
)(unsafe
.Pointer(t
))
747 return toType(tt
.out
[i
])
752 // The whySafe string is ignored, so that the function still inlines
753 // as efficiently as p+x, but all call sites should use the string to
754 // record why the addition is safe, which is to say why the addition
755 // does not cause x to advance to the very end of p's allocation
756 // and therefore point incorrectly at the next block in memory.
757 func add(p unsafe
.Pointer
, x
uintptr, whySafe
string) unsafe
.Pointer
{
758 return unsafe
.Pointer(uintptr(p
) + x
)
761 func (d ChanDir
) String() string {
770 return "ChanDir" + strconv
.Itoa(int(d
))
773 // Method returns the i'th method in the type's method set.
774 func (t
*interfaceType
) Method(i
int) (m Method
) {
775 if i
< 0 || i
>= len(t
.methods
) {
780 if p
.pkgPath
!= nil {
781 m
.PkgPath
= *p
.pkgPath
783 m
.Type
= toType(p
.typ
)
788 // NumMethod returns the number of interface methods in the type's method set.
789 func (t
*interfaceType
) NumMethod() int { return len(t
.methods
) }
791 // MethodByName method with the given name in the type's method set.
792 func (t
*interfaceType
) MethodByName(name
string) (m Method
, ok
bool) {
797 for i
:= range t
.methods
{
800 return t
.Method(i
), true
806 // A StructField describes a single field in a struct.
807 type StructField
struct {
808 // Name is the field name.
810 // PkgPath is the package path that qualifies a lower case (unexported)
811 // field name. It is empty for upper case (exported) field names.
812 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
815 Type Type
// field type
816 Tag StructTag
// field tag string
817 Offset
uintptr // offset within struct, in bytes
818 Index
[]int // index sequence for Type.FieldByIndex
819 Anonymous
bool // is an embedded field
822 // A StructTag is the tag string in a struct field.
824 // By convention, tag strings are a concatenation of
825 // optionally space-separated key:"value" pairs.
826 // Each key is a non-empty string consisting of non-control
827 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
828 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
829 // characters and Go string literal syntax.
830 type StructTag
string
832 // Get returns the value associated with key in the tag string.
833 // If there is no such key in the tag, Get returns the empty string.
834 // If the tag does not have the conventional format, the value
835 // returned by Get is unspecified. To determine whether a tag is
836 // explicitly set to the empty string, use Lookup.
837 func (tag StructTag
) Get(key
string) string {
838 v
, _
:= tag
.Lookup(key
)
842 // Lookup returns the value associated with key in the tag string.
843 // If the key is present in the tag the value (which may be empty)
844 // is returned. Otherwise the returned value will be the empty string.
845 // The ok return value reports whether the value was explicitly set in
846 // the tag string. If the tag does not have the conventional format,
847 // the value returned by Lookup is unspecified.
848 func (tag StructTag
) Lookup(key
string) (value
string, ok
bool) {
849 // When modifying this code, also update the validateStructTag code
850 // in cmd/vet/structtag.go.
853 // Skip leading space.
855 for i
< len(tag
) && tag
[i
] == ' ' {
863 // Scan to colon. A space, a quote or a control character is a syntax error.
864 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
865 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
866 // as it is simpler to inspect the tag's bytes than the tag's runes.
868 for i
< len(tag
) && tag
[i
] > ' ' && tag
[i
] != ':' && tag
[i
] != '"' && tag
[i
] != 0x7f {
871 if i
== 0 || i
+1 >= len(tag
) || tag
[i
] != ':' || tag
[i
+1] != '"' {
874 name
:= string(tag
[:i
])
877 // Scan quoted string to find value.
879 for i
< len(tag
) && tag
[i
] != '"' {
888 qvalue
:= string(tag
[:i
+1])
892 value
, err
:= strconv
.Unquote(qvalue
)
902 // Field returns the i'th struct field.
903 func (t
*structType
) Field(i
int) (f StructField
) {
904 if i
< 0 || i
>= len(t
.fields
) {
905 panic("reflect: Field index out of bounds")
908 f
.Type
= toType(p
.typ
)
910 f
.Anonymous
= p
.embedded()
911 if p
.pkgPath
!= nil {
912 f
.PkgPath
= *p
.pkgPath
915 f
.Tag
= StructTag(*p
.tag
)
917 f
.Offset
= p
.offset()
919 // NOTE(rsc): This is the only allocation in the interface
920 // presented by a reflect.Type. It would be nice to avoid,
921 // at least in the common cases, but we need to make sure
922 // that misbehaving clients of reflect cannot affect other
923 // uses of reflect. One possibility is CL 5371098, but we
924 // postponed that ugliness until there is a demonstrated
925 // need for the performance. This is issue 2320.
930 // TODO(gri): Should there be an error/bool indicator if the index
931 // is wrong for FieldByIndex?
933 // FieldByIndex returns the nested field corresponding to index.
934 func (t
*structType
) FieldByIndex(index
[]int) (f StructField
) {
935 f
.Type
= toType(&t
.rtype
)
936 for i
, x
:= range index
{
939 if ft
.Kind() == Ptr
&& ft
.Elem().Kind() == Struct
{
949 // A fieldScan represents an item on the fieldByNameFunc scan work list.
950 type fieldScan
struct {
955 // FieldByNameFunc returns the struct field with a name that satisfies the
956 // match function and a boolean to indicate if the field was found.
957 func (t
*structType
) FieldByNameFunc(match
func(string) bool) (result StructField
, ok
bool) {
958 // This uses the same condition that the Go language does: there must be a unique instance
959 // of the match at a given depth level. If there are multiple instances of a match at the
960 // same depth, they annihilate each other and inhibit any possible match at a lower level.
961 // The algorithm is breadth first search, one depth level at a time.
963 // The current and next slices are work queues:
964 // current lists the fields to visit on this depth level,
965 // and next lists the fields on the next lower level.
966 current
:= []fieldScan
{}
967 next
:= []fieldScan
{{typ
: t
}}
969 // nextCount records the number of times an embedded type has been
970 // encountered and considered for queueing in the 'next' slice.
971 // We only queue the first one, but we increment the count on each.
972 // If a struct type T can be reached more than once at a given depth level,
973 // then it annihilates itself and need not be considered at all when we
974 // process that next depth level.
975 var nextCount
map[*structType
]int
977 // visited records the structs that have been considered already.
978 // Embedded pointer fields can create cycles in the graph of
979 // reachable embedded types; visited avoids following those cycles.
980 // It also avoids duplicated effort: if we didn't find the field in an
981 // embedded type T at level 2, we won't find it in one at level 4 either.
982 visited
:= map[*structType
]bool{}
985 current
, next
= next
, current
[:0]
989 // Process all the fields at this depth, now listed in 'current'.
990 // The loop queues embedded fields found in 'next', for processing during the next
991 // iteration. The multiplicity of the 'current' field counts is recorded
992 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
993 for _
, scan
:= range current
{
996 // We've looked through this type before, at a higher level.
997 // That higher level would shadow the lower level we're now at,
998 // so this one can't be useful to us. Ignore it.
1002 for i
:= range t
.fields
{
1004 // Find name and (for embedded field) type for field f.
1008 // Embedded field of type T or *T.
1010 if ntyp
.Kind() == Ptr
{
1011 ntyp
= ntyp
.Elem().common()
1018 if count
[t
] > 1 || ok
{
1019 // Name appeared multiple times at this level: annihilate.
1020 return StructField
{}, false
1024 result
.Index
= append(result
.Index
, scan
.index
...)
1025 result
.Index
= append(result
.Index
, i
)
1030 // Queue embedded struct fields for processing with next level,
1031 // but only if we haven't seen a match yet at this level and only
1032 // if the embedded types haven't already been queued.
1033 if ok || ntyp
== nil || ntyp
.Kind() != Struct
{
1036 ntyp
= toType(ntyp
).common()
1037 styp
:= (*structType
)(unsafe
.Pointer(ntyp
))
1038 if nextCount
[styp
] > 0 {
1039 nextCount
[styp
] = 2 // exact multiple doesn't matter
1042 if nextCount
== nil {
1043 nextCount
= map[*structType
]int{}
1047 nextCount
[styp
] = 2 // exact multiple doesn't matter
1050 index
= append(index
, scan
.index
...)
1051 index
= append(index
, i
)
1052 next
= append(next
, fieldScan
{styp
, index
})
1062 // FieldByName returns the struct field with the given name
1063 // and a boolean to indicate if the field was found.
1064 func (t
*structType
) FieldByName(name
string) (f StructField
, present
bool) {
1065 // Quick check for top-level name, or struct without embedded fields.
1068 for i
:= range t
.fields
{
1070 if *tf
.name
== name
{
1071 return t
.Field(i
), true
1081 return t
.FieldByNameFunc(func(s
string) bool { return s
== name
})
1084 // TypeOf returns the reflection Type that represents the dynamic type of i.
1085 // If i is a nil interface value, TypeOf returns nil.
1086 func TypeOf(i
interface{}) Type
{
1087 eface
:= *(*emptyInterface
)(unsafe
.Pointer(&i
))
1088 return toType(eface
.typ
)
1091 // ptrMap is the cache for PtrTo.
1092 var ptrMap sync
.Map
// map[*rtype]*ptrType
1094 // PtrTo returns the pointer type with element t.
1095 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1096 func PtrTo(t Type
) Type
{
1097 return t
.(*rtype
).ptrTo()
1100 func (t
*rtype
) ptrTo() *rtype
{
1101 if p
:= t
.ptrToThis
; p
!= nil {
1106 if pi
, ok
:= ptrMap
.Load(t
); ok
{
1107 return &pi
.(*ptrType
).rtype
1110 s
:= "*" + *t
.string
1112 canonicalTypeLock
.RLock()
1113 r
, ok
:= canonicalType
[s
]
1114 canonicalTypeLock
.RUnlock()
1116 p
:= (*ptrType
)(unsafe
.Pointer(r
.(*rtype
)))
1117 pi
, _
:= ptrMap
.LoadOrStore(t
, p
)
1118 return &pi
.(*ptrType
).rtype
1121 // Create a new ptrType starting with the description
1122 // of an *unsafe.Pointer.
1123 var iptr
interface{} = (*unsafe
.Pointer
)(nil)
1124 prototype
:= *(**ptrType
)(unsafe
.Pointer(&iptr
))
1130 // For the type structures linked into the binary, the
1131 // compiler provides a good hash of the string.
1132 // Create a good hash for the new string by using
1133 // the FNV-1 hash's mixing function to combine the
1134 // old hash and the new "*".
1135 // p.hash = fnv1(t.hash, '*')
1136 // This is the gccgo version.
1137 pp
.hash
= (t
.hash
<< 4) + 9
1139 pp
.uncommonType
= nil
1143 q
:= canonicalize(&pp
.rtype
)
1144 p
:= (*ptrType
)(unsafe
.Pointer(q
.(*rtype
)))
1146 pi
, _
:= ptrMap
.LoadOrStore(t
, p
)
1147 return &pi
.(*ptrType
).rtype
1150 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1151 func fnv1(x
uint32, list
...byte) uint32 {
1152 for _
, b
:= range list
{
1153 x
= x
*16777619 ^ uint32(b
)
1158 func (t
*rtype
) Implements(u Type
) bool {
1160 panic("reflect: nil type passed to Type.Implements")
1162 if u
.Kind() != Interface
{
1163 panic("reflect: non-interface type passed to Type.Implements")
1165 return implements(u
.(*rtype
), t
)
1168 func (t
*rtype
) AssignableTo(u Type
) bool {
1170 panic("reflect: nil type passed to Type.AssignableTo")
1173 return directlyAssignable(uu
, t
) ||
implements(uu
, t
)
1176 func (t
*rtype
) ConvertibleTo(u Type
) bool {
1178 panic("reflect: nil type passed to Type.ConvertibleTo")
1181 return convertOp(uu
, t
) != nil
1184 func (t
*rtype
) Comparable() bool {
1186 case Bool
, Int
, Int8
, Int16
, Int32
, Int64
,
1187 Uint
, Uint8
, Uint16
, Uint32
, Uint64
, Uintptr
,
1188 Float32
, Float64
, Complex64
, Complex128
,
1189 Chan
, Interface
, Ptr
, String
, UnsafePointer
:
1192 case Func
, Map
, Slice
:
1196 return (*arrayType
)(unsafe
.Pointer(t
)).elem
.Comparable()
1199 tt
:= (*structType
)(unsafe
.Pointer(t
))
1200 for i
:= range tt
.fields
{
1201 if !tt
.fields
[i
].typ
.Comparable() {
1208 panic("reflect: impossible")
1212 // implements reports whether the type V implements the interface type T.
1213 func implements(T
, V
*rtype
) bool {
1214 if T
.Kind() != Interface
{
1217 t
:= (*interfaceType
)(unsafe
.Pointer(T
))
1218 if len(t
.methods
) == 0 {
1222 // The same algorithm applies in both cases, but the
1223 // method tables for an interface type and a concrete type
1224 // are different, so the code is duplicated.
1225 // In both cases the algorithm is a linear scan over the two
1226 // lists - T's methods and V's methods - simultaneously.
1227 // Since method tables are stored in a unique sorted order
1228 // (alphabetical, with no duplicate method names), the scan
1229 // through V's methods must hit a match for each of T's
1230 // methods along the way, or else V does not implement T.
1231 // This lets us run the scan in overall linear time instead of
1232 // the quadratic time a naive search would require.
1233 // See also ../runtime/iface.go.
1234 if V
.Kind() == Interface
{
1235 v
:= (*interfaceType
)(unsafe
.Pointer(V
))
1237 for j
:= 0; j
< len(v
.methods
); j
++ {
1240 if *vm
.name
== *tm
.name
&& (vm
.pkgPath
== tm
.pkgPath ||
(vm
.pkgPath
!= nil && tm
.pkgPath
!= nil && *vm
.pkgPath
== *tm
.pkgPath
)) && toType(vm
.typ
).common() == toType(tm
.typ
).common() {
1241 if i
++; i
>= len(t
.methods
) {
1254 for j
:= 0; j
< len(v
.methods
); j
++ {
1257 if *vm
.name
== *tm
.name
&& (vm
.pkgPath
== tm
.pkgPath ||
(vm
.pkgPath
!= nil && tm
.pkgPath
!= nil && *vm
.pkgPath
== *tm
.pkgPath
)) && toType(vm
.mtyp
).common() == toType(tm
.typ
).common() {
1258 if i
++; i
>= len(t
.methods
) {
1266 // directlyAssignable reports whether a value x of type V can be directly
1267 // assigned (using memmove) to a value of type T.
1268 // https://golang.org/doc/go_spec.html#Assignability
1269 // Ignoring the interface rules (implemented elsewhere)
1270 // and the ideal constant rules (no ideal constants at run time).
1271 func directlyAssignable(T
, V
*rtype
) bool {
1272 // x's type V is identical to T?
1277 // Otherwise at least one of T and V must not be defined
1278 // and they must have the same kind.
1279 if T
.Name() != "" && V
.Name() != "" || T
.Kind() != V
.Kind() {
1283 // x's type T and V must have identical underlying types.
1284 return haveIdenticalUnderlyingType(T
, V
, true)
1287 func haveIdenticalType(T
, V Type
, cmpTags
bool) bool {
1292 if T
.Name() != V
.Name() || T
.Kind() != V
.Kind() {
1296 return haveIdenticalUnderlyingType(T
.common(), V
.common(), false)
1299 func haveIdenticalUnderlyingType(T
, V
*rtype
, cmpTags
bool) bool {
1305 if kind
!= V
.Kind() {
1309 // Non-composite types of equal kind have same underlying type
1310 // (the predefined instance of the type).
1311 if Bool
<= kind
&& kind
<= Complex128 || kind
== String || kind
== UnsafePointer
{
1318 return T
.Len() == V
.Len() && haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
)
1322 // x is a bidirectional channel value, T is a channel type,
1323 // and x's type V and T have identical element types.
1324 if V
.ChanDir() == BothDir
&& haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
) {
1328 // Otherwise continue test for identical underlying type.
1329 return V
.ChanDir() == T
.ChanDir() && haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
)
1332 t
:= (*funcType
)(unsafe
.Pointer(T
))
1333 v
:= (*funcType
)(unsafe
.Pointer(V
))
1334 if t
.dotdotdot
!= v
.dotdotdot ||
len(t
.in
) != len(v
.in
) ||
len(t
.out
) != len(v
.out
) {
1337 for i
, typ
:= range t
.in
{
1338 if !haveIdenticalType(typ
, v
.in
[i
], cmpTags
) {
1342 for i
, typ
:= range t
.out
{
1343 if !haveIdenticalType(typ
, v
.out
[i
], cmpTags
) {
1350 t
:= (*interfaceType
)(unsafe
.Pointer(T
))
1351 v
:= (*interfaceType
)(unsafe
.Pointer(V
))
1352 if len(t
.methods
) == 0 && len(v
.methods
) == 0 {
1355 // Might have the same methods but still
1356 // need a run time conversion.
1360 return haveIdenticalType(T
.Key(), V
.Key(), cmpTags
) && haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
)
1363 return haveIdenticalType(T
.Elem(), V
.Elem(), cmpTags
)
1366 t
:= (*structType
)(unsafe
.Pointer(T
))
1367 v
:= (*structType
)(unsafe
.Pointer(V
))
1368 if len(t
.fields
) != len(v
.fields
) {
1371 for i
:= range t
.fields
{
1374 if tf
.name
!= vf
.name
&& (tf
.name
== nil || vf
.name
== nil ||
*tf
.name
!= *vf
.name
) {
1377 if tf
.pkgPath
!= vf
.pkgPath
&& (tf
.pkgPath
== nil || vf
.pkgPath
== nil ||
*tf
.pkgPath
!= *vf
.pkgPath
) {
1380 if !haveIdenticalType(tf
.typ
, vf
.typ
, cmpTags
) {
1383 if cmpTags
&& tf
.tag
!= vf
.tag
&& (tf
.tag
== nil || vf
.tag
== nil ||
*tf
.tag
!= *vf
.tag
) {
1386 if tf
.offsetEmbed
!= vf
.offsetEmbed
{
1396 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1397 var lookupCache sync
.Map
// map[cacheKey]*rtype
1399 // A cacheKey is the key for use in the lookupCache.
1400 // Four values describe any of the types we are looking for:
1401 // type kind, one or two subtypes, and an extra integer.
1402 type cacheKey
struct {
1409 // The funcLookupCache caches FuncOf lookups.
1410 // FuncOf does not share the common lookupCache since cacheKey is not
1411 // sufficient to represent functions unambiguously.
1412 var funcLookupCache
struct {
1413 sync
.Mutex
// Guards stores (but not loads) on m.
1415 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1416 // Elements of m are append-only and thus safe for concurrent reading.
1420 // ChanOf returns the channel type with the given direction and element type.
1421 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1423 // The gc runtime imposes a limit of 64 kB on channel element types.
1424 // If t's size is equal to or exceeds this limit, ChanOf panics.
1425 func ChanOf(dir ChanDir
, t Type
) Type
{
1429 ckey
:= cacheKey
{Chan
, typ
, nil, uintptr(dir
)}
1430 if ch
, ok
:= lookupCache
.Load(ckey
); ok
{
1434 // This restriction is imposed by the gc compiler and the runtime.
1435 if typ
.size
>= 1<<16 {
1436 panic("reflect.ChanOf: element size too large")
1439 // Look in known types.
1440 // TODO: Precedence when constructing string.
1444 panic("reflect.ChanOf: invalid dir")
1446 s
= "chan<- " + *typ
.string
1448 s
= "<-chan " + *typ
.string
1450 s
= "chan " + *typ
.string
1453 // Make a channel type.
1454 var ichan
interface{} = (chan unsafe
.Pointer
)(nil)
1455 prototype
:= *(**chanType
)(unsafe
.Pointer(&ichan
))
1457 ch
.dir
= uintptr(dir
)
1460 // gccgo uses a different hash.
1461 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1463 if dir
&SendDir
!= 0 {
1466 if dir
&RecvDir
!= 0 {
1469 ch
.hash
+= typ
.hash
<< 2
1474 ch
.uncommonType
= nil
1477 // Canonicalize before storing in lookupCache
1478 ti
:= toType(&ch
.rtype
)
1479 lookupCache
.Store(ckey
, ti
.(*rtype
))
1483 func ismapkey(*rtype
) bool // implemented in runtime
1485 // MapOf returns the map type with the given key and element types.
1486 // For example, if k represents int and e represents string,
1487 // MapOf(k, e) represents map[int]string.
1489 // If the key type is not a valid map key type (that is, if it does
1490 // not implement Go's == operator), MapOf panics.
1491 func MapOf(key
, elem Type
) Type
{
1492 ktyp
:= key
.(*rtype
)
1493 etyp
:= elem
.(*rtype
)
1495 if !ismapkey(ktyp
) {
1496 panic("reflect.MapOf: invalid key type " + ktyp
.String())
1500 ckey
:= cacheKey
{Map
, ktyp
, etyp
, 0}
1501 if mt
, ok
:= lookupCache
.Load(ckey
); ok
{
1505 // Look in known types.
1506 s
:= "map[" + *ktyp
.string + "]" + *etyp
.string
1509 var imap
interface{} = (map[unsafe
.Pointer
]unsafe
.Pointer
)(nil)
1510 mt
:= **(**mapType
)(unsafe
.Pointer(&imap
))
1513 // gccgo uses a different hash
1514 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1515 mt
.hash
= ktyp
.hash
+ etyp
.hash
+ 2 + 14
1519 mt
.uncommonType
= nil
1522 mt
.bucket
= bucketOf(ktyp
, etyp
)
1523 if ktyp
.size
> maxKeySize
{
1524 mt
.keysize
= uint8(ptrSize
)
1527 mt
.keysize
= uint8(ktyp
.size
)
1530 if etyp
.size
> maxValSize
{
1531 mt
.valuesize
= uint8(ptrSize
)
1532 mt
.indirectvalue
= 1
1534 mt
.valuesize
= uint8(etyp
.size
)
1535 mt
.indirectvalue
= 0
1537 mt
.bucketsize
= uint16(mt
.bucket
.size
)
1538 mt
.reflexivekey
= isReflexive(ktyp
)
1539 mt
.needkeyupdate
= needKeyUpdate(ktyp
)
1541 // Canonicalize before storing in lookupCache
1542 ti
:= toType(&mt
.rtype
)
1543 lookupCache
.Store(ckey
, ti
.(*rtype
))
1547 // FuncOf returns the function type with the given argument and result types.
1548 // For example if k represents int and e represents string,
1549 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1551 // The variadic argument controls whether the function is variadic. FuncOf
1552 // panics if the in[len(in)-1] does not represent a slice and variadic is
1554 func FuncOf(in
, out
[]Type
, variadic
bool) Type
{
1555 if variadic
&& (len(in
) == 0 || in
[len(in
)-1].Kind() != Slice
) {
1556 panic("reflect.FuncOf: last arg of variadic func must be slice")
1559 // Make a func type.
1560 var ifunc
interface{} = (func())(nil)
1561 prototype
:= *(**funcType
)(unsafe
.Pointer(&ifunc
))
1565 // Build a hash and minimally populate ft.
1567 var fin
, fout
[]*rtype
1569 for _
, in
:= range in
{
1571 fin
= append(fin
, t
)
1572 hash
+= t
.hash
<< shift
1576 for _
, out
:= range out
{
1578 fout
= append(fout
, t
)
1579 hash
+= t
.hash
<< shift
1590 ft
.dotdotdot
= variadic
1593 if ts
, ok
:= funcLookupCache
.m
.Load(hash
); ok
{
1594 for _
, t
:= range ts
.([]*rtype
) {
1595 if haveIdenticalUnderlyingType(&ft
.rtype
, t
, true) {
1601 // Not in cache, lock and retry.
1602 funcLookupCache
.Lock()
1603 defer funcLookupCache
.Unlock()
1604 if ts
, ok
:= funcLookupCache
.m
.Load(hash
); ok
{
1605 for _
, t
:= range ts
.([]*rtype
) {
1606 if haveIdenticalUnderlyingType(&ft
.rtype
, t
, true) {
1612 addToCache
:= func(tt
*rtype
) Type
{
1614 if rti
, ok
:= funcLookupCache
.m
.Load(hash
); ok
{
1615 rts
= rti
.([]*rtype
)
1617 funcLookupCache
.m
.Store(hash
, append(rts
, tt
))
1623 // Populate the remaining fields of ft and store in cache.
1625 ft
.uncommonType
= nil
1628 // Canonicalize before storing in funcLookupCache
1629 tc
:= toType(&ft
.rtype
)
1630 return addToCache(tc
.(*rtype
))
1633 // funcStr builds a string representation of a funcType.
1634 func funcStr(ft
*funcType
) string {
1635 repr
:= make([]byte, 0, 64)
1636 repr
= append(repr
, "func("...)
1637 for i
, t
:= range ft
.in
{
1639 repr
= append(repr
, ", "...)
1641 if ft
.dotdotdot
&& i
== len(ft
.in
)-1 {
1642 repr
= append(repr
, "..."...)
1643 repr
= append(repr
, *(*sliceType
)(unsafe
.Pointer(t
)).elem
.string...)
1645 repr
= append(repr
, *t
.string...)
1648 repr
= append(repr
, ')')
1649 if l
:= len(ft
.out
); l
== 1 {
1650 repr
= append(repr
, ' ')
1652 repr
= append(repr
, " ("...)
1654 for i
, t
:= range ft
.out
{
1656 repr
= append(repr
, ", "...)
1658 repr
= append(repr
, *t
.string...)
1660 if len(ft
.out
) > 1 {
1661 repr
= append(repr
, ')')
1666 // isReflexive reports whether the == operation on the type is reflexive.
1667 // That is, x == x for all values x of type t.
1668 func isReflexive(t
*rtype
) bool {
1670 case Bool
, Int
, Int8
, Int16
, Int32
, Int64
, Uint
, Uint8
, Uint16
, Uint32
, Uint64
, Uintptr
, Chan
, Ptr
, String
, UnsafePointer
:
1672 case Float32
, Float64
, Complex64
, Complex128
, Interface
:
1675 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
1676 return isReflexive(tt
.elem
)
1678 tt
:= (*structType
)(unsafe
.Pointer(t
))
1679 for _
, f
:= range tt
.fields
{
1680 if !isReflexive(f
.typ
) {
1686 // Func, Map, Slice, Invalid
1687 panic("isReflexive called on non-key type " + t
.String())
1691 // needKeyUpdate reports whether map overwrites require the key to be copied.
1692 func needKeyUpdate(t
*rtype
) bool {
1694 case Bool
, Int
, Int8
, Int16
, Int32
, Int64
, Uint
, Uint8
, Uint16
, Uint32
, Uint64
, Uintptr
, Chan
, Ptr
, UnsafePointer
:
1696 case Float32
, Float64
, Complex64
, Complex128
, Interface
, String
:
1697 // Float keys can be updated from +0 to -0.
1698 // String keys can be updated to use a smaller backing store.
1699 // Interfaces might have floats of strings in them.
1702 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
1703 return needKeyUpdate(tt
.elem
)
1705 tt
:= (*structType
)(unsafe
.Pointer(t
))
1706 for _
, f
:= range tt
.fields
{
1707 if needKeyUpdate(f
.typ
) {
1713 // Func, Map, Slice, Invalid
1714 panic("needKeyUpdate called on non-key type " + t
.String())
1718 // Make sure these routines stay in sync with ../../runtime/map.go!
1719 // These types exist only for GC, so we only fill out GC relevant info.
1720 // Currently, that's just size and the GC program. We also fill in string
1721 // for possible debugging use.
1723 bucketSize
uintptr = 8
1724 maxKeySize
uintptr = 128
1725 maxValSize
uintptr = 128
1728 func bucketOf(ktyp
, etyp
*rtype
) *rtype
{
1729 // See comment on hmap.overflow in ../runtime/map.go.
1731 if ktyp
.kind
&kindNoPointers
!= 0 && etyp
.kind
&kindNoPointers
!= 0 &&
1732 ktyp
.size
<= maxKeySize
&& etyp
.size
<= maxValSize
{
1733 kind
= kindNoPointers
1736 if ktyp
.size
> maxKeySize
{
1737 ktyp
= PtrTo(ktyp
).(*rtype
)
1739 if etyp
.size
> maxValSize
{
1740 etyp
= PtrTo(etyp
).(*rtype
)
1743 // Prepare GC data if any.
1744 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1745 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1746 // Note that since the key and value are known to be <= 128 bytes,
1747 // they're guaranteed to have bitmaps instead of GC programs.
1752 size
= align(size
, uintptr(ktyp
.fieldAlign
))
1753 size
+= bucketSize
* ktyp
.size
1754 size
= align(size
, uintptr(etyp
.fieldAlign
))
1755 size
+= bucketSize
* etyp
.size
1757 maxAlign
:= uintptr(ktyp
.fieldAlign
)
1758 if maxAlign
< uintptr(etyp
.fieldAlign
) {
1759 maxAlign
= uintptr(etyp
.fieldAlign
)
1761 if maxAlign
> ptrSize
{
1762 size
= align(size
, maxAlign
)
1763 size
+= align(ptrSize
, maxAlign
) - ptrSize
1764 } else if maxAlign
< ptrSize
{
1765 size
= align(size
, ptrSize
)
1772 if kind
!= kindNoPointers
{
1773 nptr
:= size
/ ptrSize
1774 mask
:= make([]byte, (nptr
+7)/8)
1776 psize
= align(psize
, uintptr(ktyp
.fieldAlign
))
1777 base
:= psize
/ ptrSize
1779 if ktyp
.kind
&kindNoPointers
== 0 {
1780 if ktyp
.kind
&kindGCProg
!= 0 {
1781 panic("reflect: unexpected GC program in MapOf")
1783 kmask
:= (*[16]byte)(unsafe
.Pointer(ktyp
.gcdata
))
1784 for i
:= uintptr(0); i
< ktyp
.ptrdata
/ptrSize
; i
++ {
1785 if (kmask
[i
/8]>>(i%8
))&1 != 0 {
1786 for j
:= uintptr(0); j
< bucketSize
; j
++ {
1787 word
:= base
+ j
*ktyp
.size
/ptrSize
+ i
1788 mask
[word
/8] |
= 1 << (word
% 8)
1793 psize
+= bucketSize
* ktyp
.size
1794 psize
= align(psize
, uintptr(etyp
.fieldAlign
))
1795 base
= psize
/ ptrSize
1797 if etyp
.kind
&kindNoPointers
== 0 {
1798 if etyp
.kind
&kindGCProg
!= 0 {
1799 panic("reflect: unexpected GC program in MapOf")
1801 emask
:= (*[16]byte)(unsafe
.Pointer(etyp
.gcdata
))
1802 for i
:= uintptr(0); i
< etyp
.ptrdata
/ptrSize
; i
++ {
1803 if (emask
[i
/8]>>(i%8
))&1 != 0 {
1804 for j
:= uintptr(0); j
< bucketSize
; j
++ {
1805 word
:= base
+ j
*etyp
.size
/ptrSize
+ i
1806 mask
[word
/8] |
= 1 << (word
% 8)
1812 word
:= ovoff
/ ptrSize
1813 mask
[word
/8] |
= 1 << (word
% 8)
1815 ptrdata
= (word
+ 1) * ptrSize
1817 // overflow word must be last
1818 if ptrdata
!= size
{
1819 panic("reflect: bad layout computation in MapOf")
1824 align
: int8(maxAlign
),
1825 fieldAlign
: uint8(maxAlign
),
1831 s
:= "bucket(" + *ktyp
.string + "," + *etyp
.string + ")"
1836 // SliceOf returns the slice type with element type t.
1837 // For example, if t represents int, SliceOf(t) represents []int.
1838 func SliceOf(t Type
) Type
{
1842 ckey
:= cacheKey
{Slice
, typ
, nil, 0}
1843 if slice
, ok
:= lookupCache
.Load(ckey
); ok
{
1847 // Look in known types.
1848 s
:= "[]" + *typ
.string
1850 // Make a slice type.
1851 var islice
interface{} = ([]unsafe
.Pointer
)(nil)
1852 prototype
:= *(**sliceType
)(unsafe
.Pointer(&islice
))
1856 // gccgo uses a different hash.
1857 // slice.hash = fnv1(typ.hash, '[')
1858 slice
.hash
= typ
.hash
+ 1 + 13
1861 slice
.uncommonType
= nil
1862 slice
.ptrToThis
= nil
1864 // Canonicalize before storing in lookupCache
1865 ti
:= toType(&slice
.rtype
)
1866 lookupCache
.Store(ckey
, ti
.(*rtype
))
1870 // The structLookupCache caches StructOf lookups.
1871 // StructOf does not share the common lookupCache since we need to pin
1872 // the memory associated with *structTypeFixedN.
1873 var structLookupCache
struct {
1874 sync
.Mutex
// Guards stores (but not loads) on m.
1876 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
1877 // Elements in m are append-only and thus safe for concurrent reading.
1881 // isLetter returns true if a given 'rune' is classified as a Letter.
1882 func isLetter(ch rune
) bool {
1883 return 'a' <= ch
&& ch
<= 'z' ||
'A' <= ch
&& ch
<= 'Z' || ch
== '_' || ch
>= utf8
.RuneSelf
&& unicode
.IsLetter(ch
)
1886 // isValidFieldName checks if a string is a valid (struct) field name or not.
1888 // According to the language spec, a field name should be an identifier.
1890 // identifier = letter { letter | unicode_digit } .
1891 // letter = unicode_letter | "_" .
1892 func isValidFieldName(fieldName
string) bool {
1893 for i
, c
:= range fieldName
{
1894 if i
== 0 && !isLetter(c
) {
1898 if !(isLetter(c
) || unicode
.IsDigit(c
)) {
1903 return len(fieldName
) > 0
1906 // StructOf returns the struct type containing fields.
1907 // The Offset and Index fields are ignored and computed as they would be
1910 // StructOf currently does not generate wrapper methods for embedded
1911 // fields and panics if passed unexported StructFields.
1912 // These limitations may be lifted in a future version.
1913 func StructOf(fields
[]StructField
) Type
{
1921 fs
= make([]structField
, len(fields
))
1922 repr
= make([]byte, 0, 64)
1923 fset
= map[string]struct{}{} // fields' names
1925 hasPtr
= false // records whether at least one struct-field is a pointer
1926 hasGCProg
= false // records whether a struct-field type has a GCProg
1929 lastzero
:= uintptr(0)
1930 repr
= append(repr
, "struct {"...)
1931 for i
, field
:= range fields
{
1932 if field
.Name
== "" {
1933 panic("reflect.StructOf: field " + strconv
.Itoa(i
) + " has no name")
1935 if !isValidFieldName(field
.Name
) {
1936 panic("reflect.StructOf: field " + strconv
.Itoa(i
) + " has invalid name")
1938 if field
.Type
== nil {
1939 panic("reflect.StructOf: field " + strconv
.Itoa(i
) + " has no type")
1941 f
:= runtimeStructField(field
)
1943 if ft
.kind
&kindGCProg
!= 0 {
1950 // Update string and hash
1952 hash
= (hash
<< 1) + ft
.hash
1954 repr
= append(repr
, (" " + name
)...)
1957 repr
= append(repr
, " ?"...)
1958 if f
.typ
.Kind() == Ptr
{
1959 // Embedded ** and *interface{} are illegal
1961 if k
:= elem
.Kind(); k
== Ptr || k
== Interface
{
1962 panic("reflect.StructOf: illegal embedded field type " + ft
.String())
1964 name
= elem
.String()
1969 switch f
.typ
.Kind() {
1971 ift
:= (*interfaceType
)(unsafe
.Pointer(ft
))
1972 if len(ift
.methods
) > 0 {
1973 panic("reflect.StructOf: embedded field with methods not implemented")
1976 ptr
:= (*ptrType
)(unsafe
.Pointer(ft
))
1977 if unt
:= ptr
.uncommon(); unt
!= nil {
1978 if len(unt
.methods
) > 0 {
1979 panic("reflect.StructOf: embedded field with methods not implemented")
1982 if unt
:= ptr
.elem
.uncommon(); unt
!= nil {
1983 if len(unt
.methods
) > 0 {
1984 panic("reflect.StructOf: embedded field with methods not implemented")
1988 if unt
:= ft
.uncommon(); unt
!= nil {
1989 if len(unt
.methods
) > 0 {
1990 panic("reflect.StructOf: embedded field with methods not implemented")
1995 if _
, dup
:= fset
[name
]; dup
{
1996 panic("reflect.StructOf: duplicate field " + name
)
1998 fset
[name
] = struct{}{}
2000 repr
= append(repr
, (" " + *ft
.string)...)
2002 repr
= append(repr
, (" " + strconv
.Quote(*f
.tag
))...)
2004 if i
< len(fields
)-1 {
2005 repr
= append(repr
, ';')
2008 comparable
= comparable
&& (ft
.equalfn
!= nil)
2009 hashable
= hashable
&& (ft
.hashfn
!= nil)
2011 offset
:= align(size
, uintptr(ft
.fieldAlign
))
2012 if int8(ft
.fieldAlign
) > typalign
{
2013 typalign
= int8(ft
.fieldAlign
)
2015 size
= offset
+ ft
.size
2016 f
.offsetEmbed |
= offset
<< 1
2025 if size
> 0 && lastzero
== size
{
2026 // This is a non-zero sized struct that ends in a
2027 // zero-sized field. We add an extra byte of padding,
2028 // to ensure that taking the address of the final
2029 // zero-sized field can't manufacture a pointer to the
2030 // next object in the heap. See issue 9401.
2035 repr
= append(repr
, ' ')
2037 repr
= append(repr
, '}')
2041 // Round the size up to be a multiple of the alignment.
2042 size
= align(size
, uintptr(typalign
))
2044 // Make the struct type.
2045 var istruct
interface{} = struct{}{}
2046 prototype
:= *(**structType
)(unsafe
.Pointer(&istruct
))
2047 typ
:= new(structType
)
2052 if ts
, ok
:= structLookupCache
.m
.Load(hash
); ok
{
2053 for _
, st
:= range ts
.([]Type
) {
2055 if haveIdenticalUnderlyingType(&typ
.rtype
, t
, true) {
2061 // Not in cache, lock and retry.
2062 structLookupCache
.Lock()
2063 defer structLookupCache
.Unlock()
2064 if ts
, ok
:= structLookupCache
.m
.Load(hash
); ok
{
2065 for _
, st
:= range ts
.([]Type
) {
2067 if haveIdenticalUnderlyingType(&typ
.rtype
, t
, true) {
2073 addToCache
:= func(t Type
) Type
{
2075 if ti
, ok
:= structLookupCache
.m
.Load(hash
); ok
{
2078 structLookupCache
.m
.Store(hash
, append(ts
, t
))
2085 typ
.align
= typalign
2086 typ
.fieldAlign
= uint8(typalign
)
2088 typ
.kind |
= kindNoPointers
2090 typ
.kind
&^= kindNoPointers
2095 for i
, ft
:= range fs
{
2096 if ft
.typ
.pointers() {
2100 prog
:= []byte{0, 0, 0, 0} // will be length of prog
2101 for i
, ft
:= range fs
{
2102 if i
> lastPtrField
{
2103 // gcprog should not include anything for any field after
2104 // the last field that contains pointer data
2107 // FIXME(sbinet) handle padding, fields smaller than a word
2108 elemGC
:= (*[1 << 30]byte)(unsafe
.Pointer(ft
.typ
.gcdata
))[:]
2109 elemPtrs
:= ft
.typ
.ptrdata
/ ptrSize
2111 case ft
.typ
.kind
&kindGCProg
== 0 && ft
.typ
.ptrdata
!= 0:
2112 // Element is small with pointer mask; use as literal bits.
2114 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2116 for n
:= elemPtrs
; n
> 120; n
-= 120 {
2117 prog
= append(prog
, 120)
2118 prog
= append(prog
, mask
[:15]...)
2121 prog
= append(prog
, byte(n
))
2122 prog
= append(prog
, mask
[:(n
+7)/8]...)
2123 case ft
.typ
.kind
&kindGCProg
!= 0:
2124 // Element has GC program; emit one element.
2125 elemProg
:= elemGC
[4 : 4+*(*uint32)(unsafe
.Pointer(&elemGC
[0]))-1]
2126 prog
= append(prog
, elemProg
...)
2128 // Pad from ptrdata to size.
2129 elemWords
:= ft
.typ
.size
/ ptrSize
2130 if elemPtrs
< elemWords
{
2131 // Emit literal 0 bit, then repeat as needed.
2132 prog
= append(prog
, 0x01, 0x00)
2133 if elemPtrs
+1 < elemWords
{
2134 prog
= append(prog
, 0x81)
2135 prog
= appendVarint(prog
, elemWords
-elemPtrs
-1)
2139 *(*uint32)(unsafe
.Pointer(&prog
[0])) = uint32(len(prog
) - 4)
2140 typ
.kind |
= kindGCProg
2141 typ
.gcdata
= &prog
[0]
2143 typ
.kind
&^= kindGCProg
2144 bv
:= new(bitVector
)
2145 addTypeBits(bv
, 0, typ
.common())
2146 if len(bv
.data
) > 0 {
2147 typ
.gcdata
= &bv
.data
[0]
2150 typ
.ptrdata
= typeptrdata(typ
.common())
2153 typ
.hashfn
= func(p unsafe
.Pointer
, seed
uintptr) uintptr {
2155 for _
, ft
:= range typ
.fields
{
2156 pi
:= add(p
, ft
.offset(), "&x.field safe")
2157 o
= ft
.typ
.hashfn(pi
, o
)
2166 typ
.equalfn
= func(p
, q unsafe
.Pointer
) bool {
2167 for _
, ft
:= range typ
.fields
{
2168 pi
:= add(p
, ft
.offset(), "&x.field safe")
2169 qi
:= add(q
, ft
.offset(), "&x.field safe")
2170 if !ft
.typ
.equalfn(pi
, qi
) {
2180 typ
.kind
&^= kindDirectIface
2181 typ
.uncommonType
= nil
2184 // Canonicalize before storing in structLookupCache
2185 ti
:= toType(&typ
.rtype
)
2186 return addToCache(ti
.(*rtype
))
2189 func runtimeStructField(field StructField
) structField
{
2190 if field
.PkgPath
!= "" {
2191 panic("reflect.StructOf: StructOf does not allow unexported fields")
2194 // Best-effort check for misuse.
2195 // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
2197 if 'a' <= c
&& c
<= 'z' || c
== '_' {
2198 panic("reflect.StructOf: field \"" + field
.Name
+ "\" is unexported but missing PkgPath")
2201 offsetEmbed
:= uintptr(0)
2202 if field
.Anonymous
{
2210 if field
.Tag
!= "" {
2211 st
:= string(field
.Tag
)
2218 typ
: field
.Type
.common(),
2220 offsetEmbed
: offsetEmbed
,
2224 // typeptrdata returns the length in bytes of the prefix of t
2225 // containing pointer data. Anything after this offset is scalar data.
2226 // keep in sync with ../cmd/compile/internal/gc/reflect.go
2227 func typeptrdata(t
*rtype
) uintptr {
2233 st
:= (*structType
)(unsafe
.Pointer(t
))
2234 // find the last field that has pointers.
2236 for i
:= range st
.fields
{
2237 ft
:= st
.fields
[i
].typ
2242 f
:= st
.fields
[field
]
2243 return f
.offset() + f
.typ
.ptrdata
2246 panic("reflect.typeptrdata: unexpected type, " + t
.String())
2250 // See cmd/compile/internal/gc/reflect.go for derivation of constant.
2251 const maxPtrmaskBytes
= 2048
2253 // ArrayOf returns the array type with the given count and element type.
2254 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2256 // If the resulting type would be larger than the available address space,
2258 func ArrayOf(count
int, elem Type
) Type
{
2259 typ
:= elem
.(*rtype
)
2262 ckey
:= cacheKey
{Array
, typ
, nil, uintptr(count
)}
2263 if array
, ok
:= lookupCache
.Load(ckey
); ok
{
2267 // Look in known types.
2268 s
:= "[" + strconv
.Itoa(count
) + "]" + *typ
.string
2270 // Make an array type.
2271 var iarray
interface{} = [1]unsafe
.Pointer
{}
2272 prototype
:= *(**arrayType
)(unsafe
.Pointer(&iarray
))
2276 // gccgo uses a different hash.
2277 // array.hash = fnv1(typ.hash, '[')
2278 // for n := uint32(count); n > 0; n >>= 8 {
2279 // array.hash = fnv1(array.hash, byte(n))
2281 // array.hash = fnv1(array.hash, ']')
2282 array
.hash
= typ
.hash
+ 1 + 13
2285 array
.ptrToThis
= nil
2287 max
:= ^uintptr(0) / typ
.size
2288 if uintptr(count
) > max
{
2289 panic("reflect.ArrayOf: array size would exceed virtual address space")
2292 array
.size
= typ
.size
* uintptr(count
)
2293 if count
> 0 && typ
.ptrdata
!= 0 {
2294 array
.ptrdata
= typ
.size
*uintptr(count
-1) + typ
.ptrdata
2296 array
.align
= typ
.align
2297 array
.fieldAlign
= typ
.fieldAlign
2298 array
.uncommonType
= nil
2299 array
.len = uintptr(count
)
2300 array
.slice
= SliceOf(elem
).(*rtype
)
2302 array
.kind
&^= kindNoPointers
2304 case typ
.kind
&kindNoPointers
!= 0 || array
.size
== 0:
2306 array
.kind |
= kindNoPointers
2311 // In memory, 1-element array looks just like the element.
2312 array
.kind |
= typ
.kind
& kindGCProg
2313 array
.gcdata
= typ
.gcdata
2314 array
.ptrdata
= typ
.ptrdata
2316 case typ
.kind
&kindGCProg
== 0 && array
.size
<= maxPtrmaskBytes
*8*ptrSize
:
2317 // Element is small with pointer mask; array is still small.
2318 // Create direct pointer mask by turning each 1 bit in elem
2319 // into count 1 bits in larger mask.
2320 mask
:= make([]byte, (array
.ptrdata
/ptrSize
+7)/8)
2321 elemMask
:= (*[1 << 30]byte)(unsafe
.Pointer(typ
.gcdata
))[:]
2322 elemWords
:= typ
.size
/ ptrSize
2323 for j
:= uintptr(0); j
< typ
.ptrdata
/ptrSize
; j
++ {
2324 if (elemMask
[j
/8]>>(j%8
))&1 != 0 {
2325 for i
:= uintptr(0); i
< array
.len; i
++ {
2326 k
:= i
*elemWords
+ j
2327 mask
[k
/8] |
= 1 << (k
% 8)
2331 array
.gcdata
= &mask
[0]
2334 // Create program that emits one element
2335 // and then repeats to make the array.
2336 prog
:= []byte{0, 0, 0, 0} // will be length of prog
2337 elemGC
:= (*[1 << 30]byte)(unsafe
.Pointer(typ
.gcdata
))[:]
2338 elemPtrs
:= typ
.ptrdata
/ ptrSize
2339 if typ
.kind
&kindGCProg
== 0 {
2340 // Element is small with pointer mask; use as literal bits.
2342 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2344 for n
= elemPtrs
; n
> 120; n
-= 120 {
2345 prog
= append(prog
, 120)
2346 prog
= append(prog
, mask
[:15]...)
2349 prog
= append(prog
, byte(n
))
2350 prog
= append(prog
, mask
[:(n
+7)/8]...)
2352 // Element has GC program; emit one element.
2353 elemProg
:= elemGC
[4 : 4+*(*uint32)(unsafe
.Pointer(&elemGC
[0]))-1]
2354 prog
= append(prog
, elemProg
...)
2356 // Pad from ptrdata to size.
2357 elemWords
:= typ
.size
/ ptrSize
2358 if elemPtrs
< elemWords
{
2359 // Emit literal 0 bit, then repeat as needed.
2360 prog
= append(prog
, 0x01, 0x00)
2361 if elemPtrs
+1 < elemWords
{
2362 prog
= append(prog
, 0x81)
2363 prog
= appendVarint(prog
, elemWords
-elemPtrs
-1)
2366 // Repeat count-1 times.
2367 if elemWords
< 0x80 {
2368 prog
= append(prog
, byte(elemWords|
0x80))
2370 prog
= append(prog
, 0x80)
2371 prog
= appendVarint(prog
, elemWords
)
2373 prog
= appendVarint(prog
, uintptr(count
)-1)
2374 prog
= append(prog
, 0)
2375 *(*uint32)(unsafe
.Pointer(&prog
[0])) = uint32(len(prog
) - 4)
2376 array
.kind |
= kindGCProg
2377 array
.gcdata
= &prog
[0]
2378 array
.ptrdata
= array
.size
// overestimate but ok; must match program
2381 array
.kind
&^= kindDirectIface
2385 if typ
.equalfn
== nil {
2388 eequal
:= typ
.equalfn
2389 array
.equalfn
= func(p
, q unsafe
.Pointer
) bool {
2390 for i
:= 0; i
< count
; i
++ {
2391 pi
:= arrayAt(p
, i
, esize
, "i < count")
2392 qi
:= arrayAt(q
, i
, esize
, "i < count")
2393 if !eequal(pi
, qi
) {
2401 if typ
.hashfn
== nil {
2405 array
.hashfn
= func(ptr unsafe
.Pointer
, seed
uintptr) uintptr {
2407 for i
:= 0; i
< count
; i
++ {
2408 o
= ehash(arrayAt(ptr
, i
, esize
, "i < count"), o
)
2414 // Canonicalize before storing in lookupCache
2415 ti
:= toType(&array
.rtype
)
2416 lookupCache
.Store(ckey
, ti
.(*rtype
))
2420 func appendVarint(x
[]byte, v
uintptr) []byte {
2421 for ; v
>= 0x80; v
>>= 7 {
2422 x
= append(x
, byte(v|
0x80))
2424 x
= append(x
, byte(v
))
2428 // toType converts from a *rtype to a Type that can be returned
2429 // to the client of package reflect. In gc, the only concern is that
2430 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2431 // function takes care of ensuring that multiple *rtype for the same
2432 // type are coalesced into a single Type.
2433 var canonicalType
= make(map[string]Type
)
2435 var canonicalTypeLock sync
.RWMutex
2437 func canonicalize(t Type
) Type
{
2442 canonicalTypeLock
.RLock()
2443 if r
, ok
:= canonicalType
[s
]; ok
{
2444 canonicalTypeLock
.RUnlock()
2447 canonicalTypeLock
.RUnlock()
2448 canonicalTypeLock
.Lock()
2449 if r
, ok
:= canonicalType
[s
]; ok
{
2450 canonicalTypeLock
.Unlock()
2453 canonicalType
[s
] = t
2454 canonicalTypeLock
.Unlock()
2458 func toType(p
*rtype
) Type
{
2462 return canonicalize(p
)
2465 // ifaceIndir reports whether t is stored indirectly in an interface value.
2466 func ifaceIndir(t
*rtype
) bool {
2467 return t
.kind
&kindDirectIface
== 0
2470 // Layout matches runtime.gobitvector (well enough).
2471 type bitVector
struct {
2472 n
uint32 // number of bits
2476 // append a bit to the bitmap.
2477 func (bv
*bitVector
) append(bit
uint8) {
2479 bv
.data
= append(bv
.data
, 0)
2481 bv
.data
[bv
.n
/8] |
= bit
<< (bv
.n
% 8)
2485 func addTypeBits(bv
*bitVector
, offset
uintptr, t
*rtype
) {
2486 if t
.kind
&kindNoPointers
!= 0 {
2490 switch Kind(t
.kind
& kindMask
) {
2491 case Chan
, Func
, Map
, Ptr
, Slice
, String
, UnsafePointer
:
2492 // 1 pointer at start of representation
2493 for bv
.n
< uint32(offset
/uintptr(ptrSize
)) {
2500 for bv
.n
< uint32(offset
/uintptr(ptrSize
)) {
2507 // repeat inner type
2508 tt
:= (*arrayType
)(unsafe
.Pointer(t
))
2509 for i
:= 0; i
< int(tt
.len); i
++ {
2510 addTypeBits(bv
, offset
+uintptr(i
)*tt
.elem
.size
, tt
.elem
)
2515 tt
:= (*structType
)(unsafe
.Pointer(t
))
2516 for i
:= range tt
.fields
{
2518 addTypeBits(bv
, offset
+f
.offset(), f
.typ
)